aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Brown <broonie@opensource.wolfsonmicro.com>2013-04-17 09:24:35 -0400
committerMark Brown <broonie@opensource.wolfsonmicro.com>2013-04-17 09:24:35 -0400
commit753e23ea588d353da9d0a2672828336453607265 (patch)
treea0eb4875ecde41725fe890e27f52d69812031ea1
parentc999836d37c6c1125e856f68877ae13952baa61a (diff)
parent41ef2d5678d83af030125550329b6ae8b74618fa (diff)
Merge tag 'v3.9-rc7' into asoc-dma
Linux 3.9-rc7
-rw-r--r--Documentation/DocBook/device-drivers.tmpl2
-rw-r--r--Documentation/networking/ipvs-sysctl.txt7
-rw-r--r--Documentation/scsi/LICENSE.qla2xxx2
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt5
-rw-r--r--MAINTAINERS42
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/Makefile2
-rw-r--r--arch/alpha/include/asm/floppy.h2
-rw-r--r--arch/alpha/kernel/irq.c7
-rw-r--r--arch/alpha/kernel/irq_alpha.c10
-rw-r--r--arch/alpha/kernel/sys_nautilus.c5
-rw-r--r--arch/alpha/kernel/sys_titan.c14
-rw-r--r--arch/arc/include/asm/dma-mapping.h2
-rw-r--r--arch/arc/include/asm/elf.h3
-rw-r--r--arch/arc/include/asm/entry.h2
-rw-r--r--arch/arc/include/asm/irqflags.h12
-rw-r--r--arch/arc/include/asm/kgdb.h6
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/syscalls.h2
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h4
-rw-r--r--arch/arc/kernel/entry.S27
-rw-r--r--arch/arc/kernel/kgdb.c1
-rw-r--r--arch/arc/kernel/setup.c4
-rw-r--r--arch/arc/kernel/sys.c2
-rw-r--r--arch/arm/Kconfig14
-rw-r--r--arch/arm/Kconfig.debug1
-rw-r--r--arch/arm/boot/dts/armada-370-mirabox.dts2
-rw-r--r--arch/arm/boot/dts/armada-370.dtsi6
-rw-r--r--arch/arm/boot/dts/dbx5x0.dtsi4
-rw-r--r--arch/arm/boot/dts/imx28-m28evk.dts1
-rw-r--r--arch/arm/boot/dts/imx28-sps1.dts1
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi1
-rw-r--r--arch/arm/boot/dts/kirkwood-goflexnet.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts14
-rw-r--r--arch/arm/boot/dts/orion5x.dtsi9
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi2
-rw-r--r--arch/arm/include/asm/delay.h2
-rw-r--r--arch/arm/include/asm/highmem.h7
-rw-r--r--arch/arm/include/asm/mmu_context.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h15
-rw-r--r--arch/arm/kernel/entry-common.S12
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/hw_breakpoint.c6
-rw-r--r--arch/arm/kernel/setup.c24
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/kernel/smp_tlb.c66
-rw-r--r--arch/arm/kvm/vgic.c35
-rw-r--r--arch/arm/lib/delay.c8
-rw-r--r--arch/arm/mach-cns3xxx/core.c16
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/cns3xxx.h16
-rw-r--r--arch/arm/mach-ep93xx/include/mach/uncompress.h10
-rw-r--r--arch/arm/mach-imx/clk-imx35.c2
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c3
-rw-r--r--arch/arm/mach-imx/common.h2
-rw-r--r--arch/arm/mach-imx/hotplug.c12
-rw-r--r--arch/arm/mach-imx/src.c12
-rw-r--r--arch/arm/mach-kirkwood/board-iomega_ix2_200.c7
-rw-r--r--arch/arm/mach-kirkwood/guruplug-setup.c2
-rw-r--r--arch/arm/mach-kirkwood/openrd-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/rd88f6281-setup.c1
-rw-r--r--arch/arm/mach-msm/timer.c5
-rw-r--r--arch/arm/mach-mvebu/irq-armada-370-xp.c24
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c24
-rw-r--r--arch/arm/mach-omap1/clock_data.c12
-rw-r--r--arch/arm/mach-omap2/cclock44xx_data.c20
-rw-r--r--arch/arm/mach-omap2/common.h3
-rw-r--r--arch/arm/mach-omap2/io.c18
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c7
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c7
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c6
-rw-r--r--arch/arm/mach-omap2/timer.c4
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/irqs.h4
-rw-r--r--arch/arm/mach-s3c24xx/irq.c2
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c1
-rw-r--r--arch/arm/mach-ux500/board-mop500.c12
-rw-r--r--arch/arm/mach-ux500/board-mop500.h1
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c5
-rw-r--r--arch/arm/mm/cache-l2x0.c11
-rw-r--r--arch/arm/mm/context.c3
-rw-r--r--arch/arm/mm/mmu.c73
-rw-r--r--arch/arm/mm/proc-v7.S19
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/c6x/include/asm/irqflags.h2
-rw-r--r--arch/ia64/kernel/palinfo.c77
-rw-r--r--arch/ia64/kernel/process.c5
-rw-r--r--arch/m68k/include/asm/gpio.h20
-rw-r--r--arch/mips/Kconfig7
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c5
-rw-r--r--arch/mips/bcm63xx/nvram.c7
-rw-r--r--arch/mips/bcm63xx/setup.c2
-rw-r--r--arch/mips/cavium-octeon/setup.c5
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h4
-rw-r--r--arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h209
-rw-r--r--arch/mips/include/asm/signal.h2
-rw-r--r--arch/mips/include/uapi/asm/signal.h8
-rw-r--r--arch/mips/kernel/Makefile25
-rw-r--r--arch/mips/kernel/cpu-probe.c13
-rw-r--r--arch/mips/kernel/linux32.c2
-rw-r--r--arch/mips/kernel/mcount.S11
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/lib/bitops.c16
-rw-r--r--arch/mips/lib/csum_partial.S4
-rw-r--r--arch/mips/mm/c-r4k.c6
-rw-r--r--arch/mips/mm/sc-mips.c6
-rw-r--r--arch/mips/pci/pci-alchemy.c4
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S144
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c8
-rw-r--r--arch/s390/include/asm/pgtable.h4
-rw-r--r--arch/s390/lib/uaccess_pt.c83
-rw-r--r--arch/tile/include/asm/irqflags.h10
-rw-r--r--arch/tile/kernel/setup.c25
-rw-r--r--arch/x86/boot/compressed/Makefile5
-rw-r--r--arch/x86/include/asm/paravirt.h5
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/include/asm/syscall.h4
-rw-r--r--arch/x86/include/asm/tlb.h2
-rw-r--r--arch/x86/include/asm/xen/hypercall.h4
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c3
-rw-r--r--arch/x86/kernel/microcode_intel_early.c30
-rw-r--r--arch/x86/kernel/paravirt.c25
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/x86.c13
-rw-r--r--arch/x86/lguest/boot.c1
-rw-r--r--arch/x86/lib/usercopy_64.c4
-rw-r--r--arch/x86/mm/fault.c6
-rw-r--r--arch/x86/mm/pageattr-test.c2
-rw-r--r--arch/x86/mm/pageattr.c12
-rw-r--r--arch/x86/mm/pgtable.c7
-rw-r--r--arch/x86/xen/mmu.c16
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--crypto/gcm.c17
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_i2c.c2
-rw-r--r--drivers/acpi/apei/cper.c2
-rw-r--r--drivers/acpi/pci_root.c85
-rw-r--r--drivers/acpi/processor_idle.c13
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/ata/ata_piix.c14
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-scsi.c8
-rw-r--r--drivers/base/power/qos.c60
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/base/regmap/regmap.c6
-rw-r--r--drivers/block/Kconfig4
-rw-r--r--drivers/block/aoe/aoecmd.c3
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/block/mg_disk.c4
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c331
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h18
-rw-r--r--drivers/block/rbd.c47
-rw-r--r--drivers/block/rsxx/Makefile2
-rw-r--r--drivers/block/rsxx/config.c8
-rw-r--r--drivers/block/rsxx/core.c237
-rw-r--r--drivers/block/rsxx/cregs.c112
-rw-r--r--drivers/block/rsxx/dma.c239
-rw-r--r--drivers/block/rsxx/rsxx.h6
-rw-r--r--drivers/block/rsxx/rsxx_cfg.h2
-rw-r--r--drivers/block/rsxx/rsxx_priv.h34
-rw-r--r--drivers/block/xen-blkback/blkback.c68
-rw-r--r--drivers/block/xen-blkback/common.h40
-rw-r--r--drivers/block/xen-blkback/xenbus.c14
-rw-r--r--drivers/block/xen-blkfront.c154
-rw-r--r--drivers/bluetooth/ath3k.c4
-rw-r--r--drivers/bluetooth/btusb.c2
-rw-r--r--drivers/char/hw_random/core.c9
-rw-r--r--drivers/char/virtio_console.c44
-rw-r--r--drivers/clk/tegra/clk-tegra20.c2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c10
-rw-r--r--drivers/cpufreq/cpufreq_governor.h6
-rw-r--r--drivers/cpufreq/cpufreq_stats.c12
-rw-r--r--drivers/cpufreq/intel_pstate.c35
-rw-r--r--drivers/crypto/caam/caamalg.c27
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/talitos.c30
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c2
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/dw_dmac.c23
-rw-r--r--drivers/dma/dw_dmac_regs.h1
-rw-r--r--drivers/dma/omap-dma.c20
-rw-r--r--drivers/dma/pl330.c38
-rw-r--r--drivers/eisa/pci_eisa.c67
-rw-r--r--drivers/extcon/extcon-max77693.c103
-rw-r--r--drivers/extcon/extcon-max8997.c56
-rw-r--r--drivers/firmware/Kconfig18
-rw-r--r--drivers/firmware/efivars.c150
-rw-r--r--drivers/gpio/gpio-ich.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-stmpe.c15
-rw-r--r--drivers/gpio/gpiolib-of.c5
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/drm_fops.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c370
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c40
-rw-r--r--drivers/gpu/drm/i915/intel_display.c10
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c26
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/hid/hid-core.c13
-rw-r--r--drivers/hid/hid-ids.h11
-rw-r--r--drivers/hid/hid-magicmouse.c29
-rw-r--r--drivers/hid/hid-multitouch.c6
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/Kconfig6
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c8
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/amd_iommu.c22
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/irq_remapping.c1
-rw-r--r--drivers/md/dm-cache-target.c51
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c20
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c8
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-core.c6
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite-reg.c8
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite.c1
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-mdevice.c39
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c1
-rw-r--r--drivers/media/radio/radio-ma901.c11
-rw-r--r--drivers/media/rc/Kconfig2
-rw-r--r--drivers/media/v4l2-core/Makefile2
-rw-r--r--drivers/misc/mei/hw-me.c29
-rw-r--r--drivers/misc/mei/init.c18
-rw-r--r--drivers/misc/mei/mei_dev.h1
-rw-r--r--drivers/misc/mei/pci-me.c52
-rw-r--r--drivers/misc/vmw_vmci/Kconfig2
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.c4
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/bonding/bond_sysfs.c97
-rw-r--r--drivers/net/can/sja1000/Kconfig1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c4
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/sja1000/sja1000.h2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c7
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c9
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c214
-rw-r--r--drivers/net/ethernet/davicom/dm9000.h11
-rw-r--r--drivers/net/ethernet/freescale/fec.c86
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/e100.c36
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c7
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c33
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c24
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c9
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c45
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c28
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c38
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/hyperv/netvsc.c17
-rw-r--r--drivers/net/hyperv/netvsc_drv.c2
-rw-r--r--drivers/net/hyperv/rndis_filter.c14
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/b43/dma.c65
-rw-r--r--drivers/net/wireless/b43/phy_n.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c46
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c369
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c64
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c22
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c18
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c13
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c3
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c22
-rw-r--r--drivers/net/wireless/mwifiex/init.c8
-rw-r--r--drivers/net/wireless/mwifiex/main.h4
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c1
-rw-r--r--drivers/net/wireless/mwifiex/scan.c17
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c10
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig7
-rw-r--r--drivers/net/wireless/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mmio.c216
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mmio.h119
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c176
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h88
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c1
-rw-r--r--drivers/nfc/microread/mei.c38
-rw-r--r--drivers/pci/pci-acpi.c15
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pcie/portdrv_pci.c13
-rw-r--r--drivers/pci/rom.c67
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c2
-rw-r--r--drivers/pinctrl/pinconf.c2
-rw-r--r--drivers/pinctrl/pinconf.h2
-rw-r--r--drivers/pinctrl/pinctrl-abx500.c2
-rw-r--r--drivers/pinctrl/pinmux.c5
-rw-r--r--drivers/remoteproc/Kconfig2
-rw-r--r--drivers/remoteproc/remoteproc_core.c6
-rw-r--r--drivers/remoteproc/ste_modem_rproc.c7
-rw-r--r--drivers/rtc/rtc-at91rm9200.c50
-rw-r--r--drivers/rtc/rtc-at91rm9200.h1
-rw-r--r--drivers/s390/block/scm_blk.c11
-rw-r--r--drivers/s390/block/scm_drv.c2
-rw-r--r--drivers/s390/char/tty3270.c16
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c19
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c6
-rw-r--r--drivers/scsi/fcoe/fcoe.c15
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c60
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ipr.c13
-rw-r--r--drivers/scsi/libfc/fc_disc.c26
-rw-r--r--drivers/scsi/libsas/sas_expander.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c58
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/st.c8
-rw-r--r--drivers/spi/Kconfig3
-rw-r--r--drivers/spi/spi-bcm63xx.c3
-rw-r--r--drivers/spi/spi-mpc512x-psc.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c1
-rw-r--r--drivers/spi/spi-s3c64xx.c41
-rw-r--r--drivers/spi/spi-tegra20-slink.c25
-rw-r--r--drivers/spi/spi.c17
-rw-r--r--drivers/staging/comedi/drivers/s626.c2
-rw-r--r--drivers/staging/zcache/Kconfig2
-rw-r--r--drivers/target/target_core_alua.c3
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/tty/mxser.c8
-rw-r--r--drivers/tty/serial/8250/8250_core.c (renamed from drivers/tty/serial/8250/8250.c)6
-rw-r--r--drivers/tty/serial/8250/8250_pci.c13
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c12
-rw-r--r--drivers/tty/serial/8250/Kconfig17
-rw-r--r--drivers/tty/serial/8250/Makefile8
-rw-r--r--drivers/tty/serial/atmel_serial.c11
-rw-r--r--drivers/tty/serial/omap-serial.c11
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/vt/vc_screen.c6
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/core/port.c1
-rw-r--r--drivers/usb/core/usb-acpi.c8
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/host/ehci-sched.c2
-rw-r--r--drivers/usb/host/xhci-mem.c36
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c61
-rw-r--r--drivers/usb/host/xhci.c22
-rw-r--r--drivers/usb/host/xhci.h5
-rw-r--r--drivers/usb/phy/Kconfig1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/usb-serial.c1
-rw-r--r--drivers/vfio/pci/vfio_pci.c3
-rw-r--r--drivers/vhost/tcm_vhost.c209
-rw-r--r--drivers/video/fbmon.c2
-rw-r--r--drivers/video/mxsfb.c7
-rw-r--r--drivers/video/omap/omapfb_main.c2
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c13
-rw-r--r--drivers/video/omap2/dss/dss_features.c6
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c1
-rw-r--r--drivers/video/uvesafb.c3
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/events.c39
-rw-r--r--drivers/xen/fallback.c3
-rw-r--r--drivers/xen/xen-acpi-processor.c3
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c59
-rw-r--r--fs/block_dev.c1
-rw-r--r--fs/btrfs/ctree.c30
-rw-r--r--fs/btrfs/disk-io.c14
-rw-r--r--fs/btrfs/extent-tree.c84
-rw-r--r--fs/btrfs/extent_io.c33
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/file-item.c6
-rw-r--r--fs/btrfs/file.c9
-rw-r--r--fs/btrfs/inode.c22
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/qgroup.c3
-rw-r--r--fs/btrfs/scrub.c3
-rw-r--r--fs/btrfs/send.c10
-rw-r--r--fs/btrfs/tree-log.c48
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/cifs/connect.c16
-rw-r--r--fs/dcache.c16
-rw-r--r--fs/ecryptfs/miscdev.c14
-rw-r--r--fs/ext4/extents.c11
-rw-r--r--fs/ext4/indirect.c4
-rw-r--r--fs/gfs2/file.c5
-rw-r--r--fs/gfs2/incore.h1
-rw-r--r--fs/gfs2/lock_dlm.c39
-rw-r--r--fs/gfs2/rgrp.c32
-rw-r--r--fs/inode.c2
-rw-r--r--fs/internal.h5
-rw-r--r--fs/namespace.c56
-rw-r--r--fs/nfs/blocklayout/blocklayoutdm.c4
-rw-r--r--fs/nfs/idmap.c13
-rw-r--r--fs/nfs/nfs4client.c45
-rw-r--r--fs/nfs/nfs4filelayout.c1
-rw-r--r--fs/nfs/nfs4proc.c17
-rw-r--r--fs/nfs/nfs4state.c8
-rw-r--r--fs/nfs/pnfs.c81
-rw-r--r--fs/nfs/pnfs.h6
-rw-r--r--fs/nfsd/nfs4xdr.c2
-rw-r--r--fs/nfsd/nfscache.c11
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/pnode.c6
-rw-r--r--fs/pnode.h1
-rw-r--r--fs/proc/generic.c119
-rw-r--r--fs/proc/root.c4
-rw-r--r--fs/read_write.c28
-rw-r--r--fs/reiserfs/xattr.c4
-rw-r--r--fs/splice.c4
-rw-r--r--fs/sysfs/dir.c17
-rw-r--r--fs/sysfs/mount.c4
-rw-r--r--fs/ubifs/super.c12
-rw-r--r--include/asm-generic/tlb.h7
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/compat.h4
-rw-r--r--include/linux/debug_locks.h4
-rw-r--r--include/linux/devfreq.h16
-rw-r--r--include/linux/freezer.h3
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/kvm_types.h1
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mfd/max77693-private.h23
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mman.h4
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/mxsfb.h7
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/preempt.h22
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/security.h12
-rw-r--r--include/linux/signal.h4
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/spinlock_up.h29
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/udp.h1
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/user_namespace.h4
-rw-r--r--include/net/flow_keys.h1
-rw-r--r--include/net/ip_vs.h12
-rw-r--r--include/net/ipip.h16
-rw-r--r--include/net/iucv/af_iucv.h8
-rw-r--r--include/scsi/libfc.h3
-rw-r--r--[-rwxr-xr-x]include/sound/max98090.h0
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/uapi/linux/packet_diag.h4
-rw-r--r--include/uapi/linux/unix_diag.h4
-rw-r--r--include/xen/interface/io/blkif.h10
-rw-r--r--include/xen/interface/physdev.h6
-rw-r--r--ipc/mqueue.c12
-rw-r--r--ipc/msg.c1
-rw-r--r--kernel/capability.c24
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/events/internal.h2
-rw-r--r--kernel/events/ring_buffer.c22
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/lockdep.c17
-rw-r--r--kernel/pid_namespace.c3
-rw-r--r--kernel/sched/clock.c26
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/sched/cputime.c2
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--kernel/trace/ftrace.c54
-rw-r--r--kernel/trace/trace.c9
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/user_namespace.c11
-rw-r--r--lib/kobject.c9
-rw-r--r--mm/fremap.c12
-rw-r--r--mm/memory.c1
-rw-r--r--mm/mlock.c11
-rw-r--r--mm/mmap.c6
-rw-r--r--mm/nommu.c2
-rw-r--r--net/8021q/vlan.c14
-rw-r--r--net/atm/common.c2
-rw-r--r--net/ax25/af_ax25.c1
-rw-r--r--net/bluetooth/af_bluetooth.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c1
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/can/gw.c6
-rw-r--r--net/core/dev.c9
-rw-r--r--net/core/dev_addr_lists.c6
-rw-r--r--net/core/flow.c2
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/rtnetlink.c8
-rw-r--r--net/core/scm.c4
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/devinet.c66
-rw-r--r--net/ipv4/ipconfig.c3
-rw-r--r--net/ipv4/netfilter/Kconfig13
-rw-r--r--net/ipv4/tcp_input.c7
-rw-r--r--net/ipv4/tcp_output.c8
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/addrconf.c53
-rw-r--r--net/ipv6/ip6_input.c12
-rw-r--r--net/ipv6/netfilter/ip6t_NPT.c4
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/irda/af_irda.c8
-rw-r--r--net/iucv/af_iucv.c36
-rw-r--r--net/key/af_key.c1
-rw-r--r--net/l2tp/l2tp_core.c206
-rw-r--r--net/l2tp/l2tp_core.h22
-rw-r--r--net/l2tp/l2tp_debugfs.c28
-rw-r--r--net/l2tp/l2tp_ip.c6
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/l2tp/l2tp_netlink.c72
-rw-r--r--net/l2tp/l2tp_ppp.c111
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/mac80211/cfg.c6
-rw-r--r--net/mac80211/chan.c17
-rw-r--r--net/mac80211/ieee80211_i.h4
-rw-r--r--net/mac80211/iface.c37
-rw-r--r--net/mac80211/mesh.c3
-rw-r--r--net/mac80211/mlme.c6
-rw-r--r--net/mac80211/offchannel.c23
-rw-r--r--net/mac80211/rx.c14
-rw-r--r--net/mac80211/sta_info.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c16
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c12
-rw-r--r--net/netfilter/nf_conntrack_standalone.c1
-rw-r--r--net/netfilter/nfnetlink_acct.c2
-rw-r--r--net/netfilter/nfnetlink_queue_core.c6
-rw-r--r--net/netlink/genetlink.c1
-rw-r--r--net/netrom/af_netrom.c1
-rw-r--r--net/nfc/llcp/llcp.c8
-rw-r--r--net/nfc/llcp/sock.c9
-rw-r--r--net/rose/af_rose.c1
-rw-r--r--net/sched/sch_cbq.c5
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sunrpc/clnt.c11
-rw-r--r--net/sunrpc/sched.c9
-rw-r--r--net/tipc/socket.c7
-rw-r--r--net/unix/af_unix.c9
-rw-r--r--net/vmw_vsock/af_vsock.c8
-rw-r--r--net/vmw_vsock/vmci_transport.c34
-rw-r--r--net/vmw_vsock/vsock_addr.c10
-rw-r--r--net/vmw_vsock/vsock_addr.h2
-rw-r--r--net/wireless/core.c64
-rw-r--r--net/wireless/core.h3
-rw-r--r--net/wireless/nl80211.c52
-rw-r--r--net/wireless/scan.c24
-rw-r--r--net/wireless/sme.c8
-rw-r--r--net/wireless/trace.h5
-rw-r--r--net/wireless/wext-sme.c6
-rw-r--r--net/xfrm/xfrm_replay.c66
-rw-r--r--security/capability.c6
-rw-r--r--security/security.c5
-rw-r--r--security/selinux/hooks.c7
-rw-r--r--security/yama/yama_lsm.c4
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_eld.c2
-rw-r--r--sound/pci/hda/hda_generic.c2
-rw-r--r--sound/pci/hda/hda_intel.c6
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c4
-rw-r--r--[-rwxr-xr-x]sound/soc/codecs/max98090.c0
-rw-r--r--[-rwxr-xr-x]sound/soc/codecs/max98090.h0
-rw-r--r--sound/soc/codecs/si476x.c1
-rw-r--r--sound/soc/codecs/wm5102.c2
-rw-r--r--sound/soc/codecs/wm8903.c2
-rw-r--r--sound/soc/codecs/wm_adsp.c5
-rw-r--r--sound/soc/fsl/imx-ssi.c5
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c2
-rw-r--r--sound/soc/samsung/i2s.c17
-rw-r--r--sound/soc/sh/dma-sh7760.c4
-rw-r--r--sound/soc/soc-compress.c14
-rw-r--r--sound/soc/soc-core.c10
-rw-r--r--sound/soc/soc-dapm.c14
-rw-r--r--sound/soc/spear/spear_pcm.c12
-rw-r--r--sound/soc/tegra/tegra_pcm.c24
-rw-r--r--sound/usb/clock.c45
-rw-r--r--sound/usb/mixer_quirks.c4
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--virt/kvm/kvm_main.c47
655 files changed, 7253 insertions, 3999 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 7514dbf0a679..c36892c072da 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -227,7 +227,7 @@ X!Isound/sound_firmware.c
227 <chapter id="uart16x50"> 227 <chapter id="uart16x50">
228 <title>16x50 UART Driver</title> 228 <title>16x50 UART Driver</title>
229!Edrivers/tty/serial/serial_core.c 229!Edrivers/tty/serial/serial_core.c
230!Edrivers/tty/serial/8250/8250.c 230!Edrivers/tty/serial/8250/8250_core.c
231 </chapter> 231 </chapter>
232 232
233 <chapter id="fbdev"> 233 <chapter id="fbdev">
diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt
index f2a2488f1bf3..9573d0c48c6e 100644
--- a/Documentation/networking/ipvs-sysctl.txt
+++ b/Documentation/networking/ipvs-sysctl.txt
@@ -15,6 +15,13 @@ amemthresh - INTEGER
15 enabled and the variable is automatically set to 2, otherwise 15 enabled and the variable is automatically set to 2, otherwise
16 the strategy is disabled and the variable is set to 1. 16 the strategy is disabled and the variable is set to 1.
17 17
18backup_only - BOOLEAN
19 0 - disabled (default)
20 not 0 - enabled
21
22 If set, disable the director function while the server is
23 in backup mode to avoid packet loops for DR/TUN methods.
24
18conntrack - BOOLEAN 25conntrack - BOOLEAN
19 0 - disabled (default) 26 0 - disabled (default)
20 not 0 - enabled 27 not 0 - enabled
diff --git a/Documentation/scsi/LICENSE.qla2xxx b/Documentation/scsi/LICENSE.qla2xxx
index 27a91cf43d6d..5020b7b5a244 100644
--- a/Documentation/scsi/LICENSE.qla2xxx
+++ b/Documentation/scsi/LICENSE.qla2xxx
@@ -1,4 +1,4 @@
1Copyright (c) 2003-2012 QLogic Corporation 1Copyright (c) 2003-2013 QLogic Corporation
2QLogic Linux FC-FCoE Driver 2QLogic Linux FC-FCoE Driver
3 3
4This program includes a device driver for Linux 3.x. 4This program includes a device driver for Linux 3.x.
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 4499bd948860..95731a08f257 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -890,9 +890,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
890 enable_msi - Enable Message Signaled Interrupt (MSI) (default = off) 890 enable_msi - Enable Message Signaled Interrupt (MSI) (default = off)
891 power_save - Automatic power-saving timeout (in second, 0 = 891 power_save - Automatic power-saving timeout (in second, 0 =
892 disable) 892 disable)
893 power_save_controller - Support runtime D3 of HD-audio controller 893 power_save_controller - Reset HD-audio controller in power-saving mode
894 (-1 = on for supported chip (default), false = off, 894 (default = on)
895 true = force to on even for unsupported hardware)
896 align_buffer_size - Force rounding of buffer/period sizes to multiples 895 align_buffer_size - Force rounding of buffer/period sizes to multiples
897 of 128 bytes. This is more efficient in terms of memory 896 of 128 bytes. This is more efficient in terms of memory
898 access but isn't required by the HDA spec and prevents 897 access but isn't required by the HDA spec and prevents
diff --git a/MAINTAINERS b/MAINTAINERS
index 4cf5fd334a06..8bdd7a7ef2f4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3242,6 +3242,12 @@ F: Documentation/firmware_class/
3242F: drivers/base/firmware*.c 3242F: drivers/base/firmware*.c
3243F: include/linux/firmware.h 3243F: include/linux/firmware.h
3244 3244
3245FLASHSYSTEM DRIVER (IBM FlashSystem 70/80 PCI SSD Flash Card)
3246M: Joshua Morris <josh.h.morris@us.ibm.com>
3247M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
3248S: Maintained
3249F: drivers/block/rsxx/
3250
3245FLOPPY DRIVER 3251FLOPPY DRIVER
3246M: Jiri Kosina <jkosina@suse.cz> 3252M: Jiri Kosina <jkosina@suse.cz>
3247T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git 3253T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
@@ -4935,6 +4941,12 @@ W: logfs.org
4935S: Maintained 4941S: Maintained
4936F: fs/logfs/ 4942F: fs/logfs/
4937 4943
4944LPC32XX MACHINE SUPPORT
4945M: Roland Stigge <stigge@antcom.de>
4946L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
4947S: Maintained
4948F: arch/arm/mach-lpc32xx/
4949
4938LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) 4950LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
4939M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> 4951M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
4940M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> 4952M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
@@ -5059,9 +5071,8 @@ S: Maintained
5059F: drivers/net/ethernet/marvell/sk* 5071F: drivers/net/ethernet/marvell/sk*
5060 5072
5061MARVELL LIBERTAS WIRELESS DRIVER 5073MARVELL LIBERTAS WIRELESS DRIVER
5062M: Dan Williams <dcbw@redhat.com>
5063L: libertas-dev@lists.infradead.org 5074L: libertas-dev@lists.infradead.org
5064S: Maintained 5075S: Orphan
5065F: drivers/net/wireless/libertas/ 5076F: drivers/net/wireless/libertas/
5066 5077
5067MARVELL MV643XX ETHERNET DRIVER 5078MARVELL MV643XX ETHERNET DRIVER
@@ -5563,6 +5574,7 @@ F: include/uapi/linux/if_*
5563F: include/uapi/linux/netdevice.h 5574F: include/uapi/linux/netdevice.h
5564 5575
5565NETXEN (1/10) GbE SUPPORT 5576NETXEN (1/10) GbE SUPPORT
5577M: Manish Chopra <manish.chopra@qlogic.com>
5566M: Sony Chacko <sony.chacko@qlogic.com> 5578M: Sony Chacko <sony.chacko@qlogic.com>
5567M: Rajesh Borundia <rajesh.borundia@qlogic.com> 5579M: Rajesh Borundia <rajesh.borundia@qlogic.com>
5568L: netdev@vger.kernel.org 5580L: netdev@vger.kernel.org
@@ -5683,7 +5695,7 @@ S: Maintained
5683F: arch/arm/*omap*/*clock* 5695F: arch/arm/*omap*/*clock*
5684 5696
5685OMAP POWER MANAGEMENT SUPPORT 5697OMAP POWER MANAGEMENT SUPPORT
5686M: Kevin Hilman <khilman@ti.com> 5698M: Kevin Hilman <khilman@deeprootsystems.com>
5687L: linux-omap@vger.kernel.org 5699L: linux-omap@vger.kernel.org
5688S: Maintained 5700S: Maintained
5689F: arch/arm/*omap*/*pm* 5701F: arch/arm/*omap*/*pm*
@@ -5777,7 +5789,7 @@ F: arch/arm/*omap*/usb*
5777 5789
5778OMAP GPIO DRIVER 5790OMAP GPIO DRIVER
5779M: Santosh Shilimkar <santosh.shilimkar@ti.com> 5791M: Santosh Shilimkar <santosh.shilimkar@ti.com>
5780M: Kevin Hilman <khilman@ti.com> 5792M: Kevin Hilman <khilman@deeprootsystems.com>
5781L: linux-omap@vger.kernel.org 5793L: linux-omap@vger.kernel.org
5782S: Maintained 5794S: Maintained
5783F: drivers/gpio/gpio-omap.c 5795F: drivers/gpio/gpio-omap.c
@@ -6209,7 +6221,7 @@ F: include/linux/power_supply.h
6209F: drivers/power/ 6221F: drivers/power/
6210 6222
6211PNP SUPPORT 6223PNP SUPPORT
6212M: Adam Belay <abelay@mit.edu> 6224M: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6213M: Bjorn Helgaas <bhelgaas@google.com> 6225M: Bjorn Helgaas <bhelgaas@google.com>
6214S: Maintained 6226S: Maintained
6215F: drivers/pnp/ 6227F: drivers/pnp/
@@ -6551,12 +6563,6 @@ S: Maintained
6551F: Documentation/blockdev/ramdisk.txt 6563F: Documentation/blockdev/ramdisk.txt
6552F: drivers/block/brd.c 6564F: drivers/block/brd.c
6553 6565
6554RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card)
6555M: Joshua Morris <josh.h.morris@us.ibm.com>
6556M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
6557S: Maintained
6558F: drivers/block/rsxx/
6559
6560RANDOM NUMBER DRIVER 6566RANDOM NUMBER DRIVER
6561M: Theodore Ts'o" <tytso@mit.edu> 6567M: Theodore Ts'o" <tytso@mit.edu>
6562S: Maintained 6568S: Maintained
@@ -6625,7 +6631,7 @@ S: Supported
6625F: fs/reiserfs/ 6631F: fs/reiserfs/
6626 6632
6627REGISTER MAP ABSTRACTION 6633REGISTER MAP ABSTRACTION
6628M: Mark Brown <broonie@opensource.wolfsonmicro.com> 6634M: Mark Brown <broonie@kernel.org>
6629T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git 6635T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git
6630S: Supported 6636S: Supported
6631F: drivers/base/regmap/ 6637F: drivers/base/regmap/
@@ -6951,7 +6957,6 @@ F: drivers/scsi/st*
6951 6957
6952SCTP PROTOCOL 6958SCTP PROTOCOL
6953M: Vlad Yasevich <vyasevich@gmail.com> 6959M: Vlad Yasevich <vyasevich@gmail.com>
6954M: Sridhar Samudrala <sri@us.ibm.com>
6955M: Neil Horman <nhorman@tuxdriver.com> 6960M: Neil Horman <nhorman@tuxdriver.com>
6956L: linux-sctp@vger.kernel.org 6961L: linux-sctp@vger.kernel.org
6957W: http://lksctp.sourceforge.net 6962W: http://lksctp.sourceforge.net
@@ -7173,7 +7178,7 @@ F: arch/arm/mach-s3c2410/bast-irq.c
7173 7178
7174TI DAVINCI MACHINE SUPPORT 7179TI DAVINCI MACHINE SUPPORT
7175M: Sekhar Nori <nsekhar@ti.com> 7180M: Sekhar Nori <nsekhar@ti.com>
7176M: Kevin Hilman <khilman@ti.com> 7181M: Kevin Hilman <khilman@deeprootsystems.com>
7177L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers) 7182L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
7178T: git git://gitorious.org/linux-davinci/linux-davinci.git 7183T: git git://gitorious.org/linux-davinci/linux-davinci.git
7179Q: http://patchwork.kernel.org/project/linux-davinci/list/ 7184Q: http://patchwork.kernel.org/project/linux-davinci/list/
@@ -7374,7 +7379,7 @@ F: sound/
7374 7379
7375SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) 7380SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
7376M: Liam Girdwood <lgirdwood@gmail.com> 7381M: Liam Girdwood <lgirdwood@gmail.com>
7377M: Mark Brown <broonie@opensource.wolfsonmicro.com> 7382M: Mark Brown <broonie@kernel.org>
7378T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git 7383T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
7379L: alsa-devel@alsa-project.org (moderated for non-subscribers) 7384L: alsa-devel@alsa-project.org (moderated for non-subscribers)
7380W: http://alsa-project.org/main/index.php/ASoC 7385W: http://alsa-project.org/main/index.php/ASoC
@@ -7463,7 +7468,7 @@ F: drivers/clk/spear/
7463 7468
7464SPI SUBSYSTEM 7469SPI SUBSYSTEM
7465M: Grant Likely <grant.likely@secretlab.ca> 7470M: Grant Likely <grant.likely@secretlab.ca>
7466M: Mark Brown <broonie@opensource.wolfsonmicro.com> 7471M: Mark Brown <broonie@kernel.org>
7467L: spi-devel-general@lists.sourceforge.net 7472L: spi-devel-general@lists.sourceforge.net
7468Q: http://patchwork.kernel.org/project/spi-devel-general/list/ 7473Q: http://patchwork.kernel.org/project/spi-devel-general/list/
7469T: git git://git.secretlab.ca/git/linux-2.6.git 7474T: git git://git.secretlab.ca/git/linux-2.6.git
@@ -7706,9 +7711,10 @@ F: include/linux/swiotlb.h
7706 7711
7707SYNOPSYS ARC ARCHITECTURE 7712SYNOPSYS ARC ARCHITECTURE
7708M: Vineet Gupta <vgupta@synopsys.com> 7713M: Vineet Gupta <vgupta@synopsys.com>
7709L: linux-snps-arc@vger.kernel.org
7710S: Supported 7714S: Supported
7711F: arch/arc/ 7715F: arch/arc/
7716F: Documentation/devicetree/bindings/arc/
7717F: drivers/tty/serial/arc-uart.c
7712 7718
7713SYSV FILESYSTEM 7719SYSV FILESYSTEM
7714M: Christoph Hellwig <hch@infradead.org> 7720M: Christoph Hellwig <hch@infradead.org>
@@ -8707,7 +8713,7 @@ F: drivers/scsi/vmw_pvscsi.h
8707 8713
8708VOLTAGE AND CURRENT REGULATOR FRAMEWORK 8714VOLTAGE AND CURRENT REGULATOR FRAMEWORK
8709M: Liam Girdwood <lrg@ti.com> 8715M: Liam Girdwood <lrg@ti.com>
8710M: Mark Brown <broonie@opensource.wolfsonmicro.com> 8716M: Mark Brown <broonie@kernel.org>
8711W: http://opensource.wolfsonmicro.com/node/15 8717W: http://opensource.wolfsonmicro.com/node/15
8712W: http://www.slimlogic.co.uk/?p=48 8718W: http://www.slimlogic.co.uk/?p=48
8713T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git 8719T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git
diff --git a/Makefile b/Makefile
index 54d2b2a0fef0..9cf6783c2ec3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 9 2PATCHLEVEL = 9
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc7
5NAME = Unicycling Gorilla 5NAME = Unicycling Gorilla
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile
index 4759fe751aa1..2cc3cc519c54 100644
--- a/arch/alpha/Makefile
+++ b/arch/alpha/Makefile
@@ -12,7 +12,7 @@ NM := $(NM) -B
12 12
13LDFLAGS_vmlinux := -static -N #-relax 13LDFLAGS_vmlinux := -static -N #-relax
14CHECKFLAGS += -D__alpha__ -m64 14CHECKFLAGS += -D__alpha__ -m64
15cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data 15cflags-y := -pipe -mno-fp-regs -ffixed-8
16cflags-y += $(call cc-option, -fno-jump-tables) 16cflags-y += $(call cc-option, -fno-jump-tables)
17 17
18cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4 18cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h
index 46cefbd50e73..bae97eb19d26 100644
--- a/arch/alpha/include/asm/floppy.h
+++ b/arch/alpha/include/asm/floppy.h
@@ -26,7 +26,7 @@
26#define fd_disable_irq() disable_irq(FLOPPY_IRQ) 26#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
27#define fd_cacheflush(addr,size) /* nothing */ 27#define fd_cacheflush(addr,size) /* nothing */
28#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ 28#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\
29 IRQF_DISABLED, "floppy", NULL) 29 0, "floppy", NULL)
30#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) 30#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
31 31
32#ifdef CONFIG_PCI 32#ifdef CONFIG_PCI
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index 2872accd2215..7b2be251c30f 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -117,13 +117,6 @@ handle_irq(int irq)
117 return; 117 return;
118 } 118 }
119 119
120 /*
121 * From here we must proceed with IPL_MAX. Note that we do not
122 * explicitly enable interrupts afterwards - some MILO PALcode
123 * (namely LX164 one) seems to have severe problems with RTI
124 * at IPL 0.
125 */
126 local_irq_disable();
127 irq_enter(); 120 irq_enter();
128 generic_handle_irq_desc(irq, desc); 121 generic_handle_irq_desc(irq, desc);
129 irq_exit(); 122 irq_exit();
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 772ddfdb71a8..f433fc11877a 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector,
45 unsigned long la_ptr, struct pt_regs *regs) 45 unsigned long la_ptr, struct pt_regs *regs)
46{ 46{
47 struct pt_regs *old_regs; 47 struct pt_regs *old_regs;
48
49 /*
50 * Disable interrupts during IRQ handling.
51 * Note that there is no matching local_irq_enable() due to
52 * severe problems with RTI at IPL0 and some MILO PALcode
53 * (namely LX164).
54 */
55 local_irq_disable();
48 switch (type) { 56 switch (type) {
49 case 0: 57 case 0:
50#ifdef CONFIG_SMP 58#ifdef CONFIG_SMP
@@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector,
62 { 70 {
63 long cpu; 71 long cpu;
64 72
65 local_irq_disable();
66 smp_percpu_timer_interrupt(regs); 73 smp_percpu_timer_interrupt(regs);
67 cpu = smp_processor_id(); 74 cpu = smp_processor_id();
68 if (cpu != boot_cpuid) { 75 if (cpu != boot_cpuid) {
@@ -222,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
222 229
223struct irqaction timer_irqaction = { 230struct irqaction timer_irqaction = {
224 .handler = timer_interrupt, 231 .handler = timer_interrupt,
225 .flags = IRQF_DISABLED,
226 .name = "timer", 232 .name = "timer",
227}; 233};
228 234
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 4d4c046f708d..1383f8601a93 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -188,6 +188,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
188extern void free_reserved_mem(void *, void *); 188extern void free_reserved_mem(void *, void *);
189extern void pcibios_claim_one_bus(struct pci_bus *); 189extern void pcibios_claim_one_bus(struct pci_bus *);
190 190
191static struct resource irongate_io = {
192 .name = "Irongate PCI IO",
193 .flags = IORESOURCE_IO,
194};
191static struct resource irongate_mem = { 195static struct resource irongate_mem = {
192 .name = "Irongate PCI MEM", 196 .name = "Irongate PCI MEM",
193 .flags = IORESOURCE_MEM, 197 .flags = IORESOURCE_MEM,
@@ -209,6 +213,7 @@ nautilus_init_pci(void)
209 213
210 irongate = pci_get_bus_and_slot(0, 0); 214 irongate = pci_get_bus_and_slot(0, 0);
211 bus->self = irongate; 215 bus->self = irongate;
216 bus->resource[0] = &irongate_io;
212 bus->resource[1] = &irongate_mem; 217 bus->resource[1] = &irongate_mem;
213 218
214 pci_bus_size_bridges(bus); 219 pci_bus_size_bridges(bus);
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 5cf4a481b8c5..a53cf03f49d5 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -280,15 +280,15 @@ titan_late_init(void)
280 * all reported to the kernel as machine checks, so the handler 280 * all reported to the kernel as machine checks, so the handler
281 * is a nop so it can be called to count the individual events. 281 * is a nop so it can be called to count the individual events.
282 */ 282 */
283 titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED, 283 titan_request_irq(63+16, titan_intr_nop, 0,
284 "CChip Error", NULL); 284 "CChip Error", NULL);
285 titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED, 285 titan_request_irq(62+16, titan_intr_nop, 0,
286 "PChip 0 H_Error", NULL); 286 "PChip 0 H_Error", NULL);
287 titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED, 287 titan_request_irq(61+16, titan_intr_nop, 0,
288 "PChip 1 H_Error", NULL); 288 "PChip 1 H_Error", NULL);
289 titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED, 289 titan_request_irq(60+16, titan_intr_nop, 0,
290 "PChip 0 C_Error", NULL); 290 "PChip 0 C_Error", NULL);
291 titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED, 291 titan_request_irq(59+16, titan_intr_nop, 0,
292 "PChip 1 C_Error", NULL); 292 "PChip 1 C_Error", NULL);
293 293
294 /* 294 /*
@@ -348,9 +348,9 @@ privateer_init_pci(void)
348 * Hook a couple of extra err interrupts that the 348 * Hook a couple of extra err interrupts that the
349 * common titan code won't. 349 * common titan code won't.
350 */ 350 */
351 titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED, 351 titan_request_irq(53+16, titan_intr_nop, 0,
352 "NMI", NULL); 352 "NMI", NULL);
353 titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED, 353 titan_request_irq(50+16, titan_intr_nop, 0,
354 "Temperature Warning", NULL); 354 "Temperature Warning", NULL);
355 355
356 /* 356 /*
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index 31f77aec0823..45b8e0cea176 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg,
126 int i; 126 int i;
127 127
128 for_each_sg(sg, s, nents, i) 128 for_each_sg(sg, s, nents, i)
129 sg->dma_address = dma_map_page(dev, sg_page(s), s->offset, 129 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
130 s->length, dir); 130 s->length, dir);
131 131
132 return nents; 132 return nents;
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index f4c8d36ebecb..a26282857683 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *);
72 */ 72 */
73#define ELF_PLATFORM (NULL) 73#define ELF_PLATFORM (NULL)
74 74
75#define SET_PERSONALITY(ex) \
76 set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
77
78#endif 75#endif
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 23daa326fc9b..eb2ae53187d9 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -415,7 +415,7 @@
415 *-------------------------------------------------------------*/ 415 *-------------------------------------------------------------*/
416.macro SAVE_ALL_EXCEPTION marker 416.macro SAVE_ALL_EXCEPTION marker
417 417
418 st \marker, [sp, 8] 418 st \marker, [sp, 8] /* orig_r8 */
419 st r0, [sp, 4] /* orig_r0, needed only for sys calls */ 419 st r0, [sp, 4] /* orig_r0, needed only for sys calls */
420 420
421 /* Restore r9 used to code the early prologue */ 421 /* Restore r9 used to code the early prologue */
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index ccd84806b62f..eac071668201 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void)
39 " flag.nz %0 \n" 39 " flag.nz %0 \n"
40 : "=r"(temp), "=r"(flags) 40 : "=r"(temp), "=r"(flags)
41 : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) 41 : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
42 : "cc"); 42 : "memory", "cc");
43 43
44 return flags; 44 return flags;
45} 45}
@@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
53 __asm__ __volatile__( 53 __asm__ __volatile__(
54 " flag %0 \n" 54 " flag %0 \n"
55 : 55 :
56 : "r"(flags)); 56 : "r"(flags)
57 : "memory");
57} 58}
58 59
59/* 60/*
@@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void)
73 " and %0, %0, %1 \n" 74 " and %0, %0, %1 \n"
74 " flag %0 \n" 75 " flag %0 \n"
75 : "=&r"(temp) 76 : "=&r"(temp)
76 : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))); 77 : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
78 : "memory");
77} 79}
78 80
79/* 81/*
@@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void)
85 87
86 __asm__ __volatile__( 88 __asm__ __volatile__(
87 " lr %0, [status32] \n" 89 " lr %0, [status32] \n"
88 : "=&r"(temp)); 90 : "=&r"(temp)
91 :
92 : "memory");
89 93
90 return temp; 94 return temp;
91} 95}
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
index f3c4934f0ca9..4930957ca3d3 100644
--- a/arch/arc/include/asm/kgdb.h
+++ b/arch/arc/include/asm/kgdb.h
@@ -13,7 +13,7 @@
13 13
14#ifdef CONFIG_KGDB 14#ifdef CONFIG_KGDB
15 15
16#include <asm/user.h> 16#include <asm/ptrace.h>
17 17
18/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set 18/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
19 * register API yet */ 19 * register API yet */
@@ -53,9 +53,7 @@ enum arc700_linux_regnums {
53}; 53};
54 54
55#else 55#else
56static inline void kgdb_trap(struct pt_regs *regs, int param) 56#define kgdb_trap(regs, param)
57{
58}
59#endif 57#endif
60 58
61#endif /* __ARC_KGDB_H__ */ 59#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 8ae783d20a81..6179de7e07c2 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs)
123#define orig_r8_IS_SCALL 0x0001 123#define orig_r8_IS_SCALL 0x0001
124#define orig_r8_IS_SCALL_RESTARTED 0x0002 124#define orig_r8_IS_SCALL_RESTARTED 0x0002
125#define orig_r8_IS_BRKPT 0x0004 125#define orig_r8_IS_BRKPT 0x0004
126#define orig_r8_IS_EXCPN 0x0004 126#define orig_r8_IS_EXCPN 0x0008
127#define orig_r8_IS_IRQ1 0x0010 127#define orig_r8_IS_IRQ1 0x0010
128#define orig_r8_IS_IRQ2 0x0020 128#define orig_r8_IS_IRQ2 0x0020
129 129
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index e53a5340ba4f..dd785befe7fd 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -16,8 +16,6 @@
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18int sys_clone_wrapper(int, int, int, int, int); 18int sys_clone_wrapper(int, int, int, int, int);
19int sys_fork_wrapper(void);
20int sys_vfork_wrapper(void);
21int sys_cacheflush(uint32_t, uint32_t uint32_t); 19int sys_cacheflush(uint32_t, uint32_t uint32_t);
22int sys_arc_settls(void *); 20int sys_arc_settls(void *);
23int sys_arc_gettls(void); 21int sys_arc_gettls(void);
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 6afa4f702075..30333cec0fef 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -28,14 +28,14 @@
28*/ 28*/
29struct user_regs_struct { 29struct user_regs_struct {
30 30
31 struct scratch { 31 struct {
32 long pad; 32 long pad;
33 long bta, lp_start, lp_end, lp_count; 33 long bta, lp_start, lp_end, lp_count;
34 long status32, ret, blink, fp, gp; 34 long status32, ret, blink, fp, gp;
35 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 35 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
36 long sp; 36 long sp;
37 } scratch; 37 } scratch;
38 struct callee { 38 struct {
39 long pad; 39 long pad;
40 long r25, r24, r23, r22, r21, r20; 40 long r25, r24, r23, r22, r21, r20;
41 long r19, r18, r17, r16, r15, r14, r13; 41 long r19, r18, r17, r16, r15, r14, r13;
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index ef6800ba2f03..91eeab81f52d 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -452,7 +452,7 @@ tracesys:
452 ; using ERET won't work since next-PC has already committed 452 ; using ERET won't work since next-PC has already committed
453 lr r12, [efa] 453 lr r12, [efa]
454 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 454 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
455 st r12, [r11, THREAD_FAULT_ADDR] 455 st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
456 456
457 ; PRE Sys Call Ptrace hook 457 ; PRE Sys Call Ptrace hook
458 mov r0, sp ; pt_regs needed 458 mov r0, sp ; pt_regs needed
@@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork
792 792
793;################### Special Sys Call Wrappers ########################## 793;################### Special Sys Call Wrappers ##########################
794 794
795; TBD: call do_fork directly from here
796ARC_ENTRY sys_fork_wrapper
797 SAVE_CALLEE_SAVED_USER
798 bl @sys_fork
799 DISCARD_CALLEE_SAVED_USER
800
801 GET_CURR_THR_INFO_FLAGS r10
802 btst r10, TIF_SYSCALL_TRACE
803 bnz tracesys_exit
804
805 b ret_from_system_call
806ARC_EXIT sys_fork_wrapper
807
808ARC_ENTRY sys_vfork_wrapper
809 SAVE_CALLEE_SAVED_USER
810 bl @sys_vfork
811 DISCARD_CALLEE_SAVED_USER
812
813 GET_CURR_THR_INFO_FLAGS r10
814 btst r10, TIF_SYSCALL_TRACE
815 bnz tracesys_exit
816
817 b ret_from_system_call
818ARC_EXIT sys_vfork_wrapper
819
820ARC_ENTRY sys_clone_wrapper 795ARC_ENTRY sys_clone_wrapper
821 SAVE_CALLEE_SAVED_USER 796 SAVE_CALLEE_SAVED_USER
822 bl @sys_clone 797 bl @sys_clone
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
index 2888ba5be47e..52bdc83c1495 100644
--- a/arch/arc/kernel/kgdb.c
+++ b/arch/arc/kernel/kgdb.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/kgdb.h> 11#include <linux/kgdb.h>
12#include <linux/sched.h>
12#include <asm/disasm.h> 13#include <asm/disasm.h>
13#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
14 15
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index dc0f968dae0a..2d95ac07df7b 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
232 232
233 n += scnprintf(buf + n, len - n, "\n"); 233 n += scnprintf(buf + n, len - n, "\n");
234 234
235#ifdef _ASM_GENERIC_UNISTD_H
236 n += scnprintf(buf + n, len - n, 235 n += scnprintf(buf + n, len - n,
237 "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n"); 236 "OS ABI [v3]\t: no-legacy-syscalls\n");
238#endif
239 237
240 return buf; 238 return buf;
241} 239}
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
index f6bdd07583f3..9d6c1ca26af6 100644
--- a/arch/arc/kernel/sys.c
+++ b/arch/arc/kernel/sys.c
@@ -6,8 +6,6 @@
6#include <asm/syscalls.h> 6#include <asm/syscalls.h>
7 7
8#define sys_clone sys_clone_wrapper 8#define sys_clone sys_clone_wrapper
9#define sys_fork sys_fork_wrapper
10#define sys_vfork sys_vfork_wrapper
11 9
12#undef __SYSCALL 10#undef __SYSCALL
13#define __SYSCALL(nr, call) [nr] = (call), 11#define __SYSCALL(nr, call) [nr] = (call),
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 13b739469c51..1cacda426a0e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1183,9 +1183,9 @@ config ARM_NR_BANKS
1183 default 8 1183 default 8
1184 1184
1185config IWMMXT 1185config IWMMXT
1186 bool "Enable iWMMXt support" 1186 bool "Enable iWMMXt support" if !CPU_PJ4
1187 depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 1187 depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
1188 default y if PXA27x || PXA3xx || ARCH_MMP 1188 default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4
1189 help 1189 help
1190 Enable support for iWMMXt context switching at run time if 1190 Enable support for iWMMXt context switching at run time if
1191 running on a CPU that supports it. 1191 running on a CPU that supports it.
@@ -1439,6 +1439,16 @@ config ARM_ERRATA_775420
1439 to deadlock. This workaround puts DSB before executing ISB if 1439 to deadlock. This workaround puts DSB before executing ISB if
1440 an abort may occur on cache maintenance. 1440 an abort may occur on cache maintenance.
1441 1441
1442config ARM_ERRATA_798181
1443 bool "ARM errata: TLBI/DSB failure on Cortex-A15"
1444 depends on CPU_V7 && SMP
1445 help
1446 On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
1447 adequately shooting down all use of the old entries. This
1448 option enables the Linux kernel workaround for this erratum
1449 which sends an IPI to the CPUs that are running the same ASID
1450 as the one being invalidated.
1451
1442endmenu 1452endmenu
1443 1453
1444source "arch/arm/common/Kconfig" 1454source "arch/arm/common/Kconfig"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index ecfcdba2d17c..9b31f4311ea2 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -495,6 +495,7 @@ config DEBUG_IMX_UART_PORT
495 DEBUG_IMX53_UART || \ 495 DEBUG_IMX53_UART || \
496 DEBUG_IMX6Q_UART 496 DEBUG_IMX6Q_UART
497 default 1 497 default 1
498 depends on ARCH_MXC
498 help 499 help
499 Choose UART port on which kernel low-level debug messages 500 Choose UART port on which kernel low-level debug messages
500 should be output. 501 should be output.
diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts
index dd0c57dd9f30..3234875824dc 100644
--- a/arch/arm/boot/dts/armada-370-mirabox.dts
+++ b/arch/arm/boot/dts/armada-370-mirabox.dts
@@ -54,7 +54,7 @@
54 }; 54 };
55 55
56 mvsdio@d00d4000 { 56 mvsdio@d00d4000 {
57 pinctrl-0 = <&sdio_pins2>; 57 pinctrl-0 = <&sdio_pins3>;
58 pinctrl-names = "default"; 58 pinctrl-names = "default";
59 status = "okay"; 59 status = "okay";
60 /* 60 /*
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index 8188d138020e..a195debb67d3 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -59,6 +59,12 @@
59 "mpp50", "mpp51", "mpp52"; 59 "mpp50", "mpp51", "mpp52";
60 marvell,function = "sd0"; 60 marvell,function = "sd0";
61 }; 61 };
62
63 sdio_pins3: sdio-pins3 {
64 marvell,pins = "mpp48", "mpp49", "mpp50",
65 "mpp51", "mpp52", "mpp53";
66 marvell,function = "sd0";
67 };
62 }; 68 };
63 69
64 gpio0: gpio@d0018100 { 70 gpio0: gpio@d0018100 {
diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi
index 9de93096601a..aaa63d0a8096 100644
--- a/arch/arm/boot/dts/dbx5x0.dtsi
+++ b/arch/arm/boot/dts/dbx5x0.dtsi
@@ -191,8 +191,8 @@
191 191
192 prcmu: prcmu@80157000 { 192 prcmu: prcmu@80157000 {
193 compatible = "stericsson,db8500-prcmu"; 193 compatible = "stericsson,db8500-prcmu";
194 reg = <0x80157000 0x1000>; 194 reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>;
195 reg-names = "prcmu"; 195 reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm";
196 interrupts = <0 47 0x4>; 196 interrupts = <0 47 0x4>;
197 #address-cells = <1>; 197 #address-cells = <1>;
198 #size-cells = <1>; 198 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts
index 6ce3d17c3a29..fd36e1cca104 100644
--- a/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/arch/arm/boot/dts/imx28-m28evk.dts
@@ -152,7 +152,6 @@
152 i2c0: i2c@80058000 { 152 i2c0: i2c@80058000 {
153 pinctrl-names = "default"; 153 pinctrl-names = "default";
154 pinctrl-0 = <&i2c0_pins_a>; 154 pinctrl-0 = <&i2c0_pins_a>;
155 clock-frequency = <400000>;
156 status = "okay"; 155 status = "okay";
157 156
158 sgtl5000: codec@0a { 157 sgtl5000: codec@0a {
diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts
index e6cde8aa7fff..6c6a5442800a 100644
--- a/arch/arm/boot/dts/imx28-sps1.dts
+++ b/arch/arm/boot/dts/imx28-sps1.dts
@@ -70,7 +70,6 @@
70 i2c0: i2c@80058000 { 70 i2c0: i2c@80058000 {
71 pinctrl-names = "default"; 71 pinctrl-names = "default";
72 pinctrl-0 = <&i2c0_pins_a>; 72 pinctrl-0 = <&i2c0_pins_a>;
73 clock-frequency = <400000>;
74 status = "okay"; 73 status = "okay";
75 74
76 rtc: rtc@51 { 75 rtc: rtc@51 {
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 06ec460b4581..281a223591ff 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -91,6 +91,7 @@
91 compatible = "arm,cortex-a9-twd-timer"; 91 compatible = "arm,cortex-a9-twd-timer";
92 reg = <0x00a00600 0x20>; 92 reg = <0x00a00600 0x20>;
93 interrupts = <1 13 0xf01>; 93 interrupts = <1 13 0xf01>;
94 clocks = <&clks 15>;
94 }; 95 };
95 96
96 L2: l2-cache@00a02000 { 97 L2: l2-cache@00a02000 {
diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts
index bd83b8fc7c83..c3573be7b92c 100644
--- a/arch/arm/boot/dts/kirkwood-goflexnet.dts
+++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts
@@ -77,6 +77,7 @@
77 }; 77 };
78 78
79 nand@3000000 { 79 nand@3000000 {
80 chip-delay = <40>;
80 status = "okay"; 81 status = "okay";
81 82
82 partition@0 { 83 partition@0 {
diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
index 93c3afbef9ee..3694e94f6e99 100644
--- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
+++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
@@ -96,11 +96,11 @@
96 marvell,function = "gpio"; 96 marvell,function = "gpio";
97 }; 97 };
98 pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { 98 pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 {
99 marvell,pins = "mpp44"; 99 marvell,pins = "mpp46";
100 marvell,function = "gpio"; 100 marvell,function = "gpio";
101 }; 101 };
102 pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { 102 pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 {
103 marvell,pins = "mpp45"; 103 marvell,pins = "mpp47";
104 marvell,function = "gpio"; 104 marvell,function = "gpio";
105 }; 105 };
106 106
@@ -157,14 +157,14 @@
157 gpios = <&gpio0 16 0>; 157 gpios = <&gpio0 16 0>;
158 linux,default-trigger = "default-on"; 158 linux,default-trigger = "default-on";
159 }; 159 };
160 health_led1 { 160 rebuild_led {
161 label = "status:white:rebuild_led";
162 gpios = <&gpio1 4 0>;
163 };
164 health_led {
161 label = "status:red:health_led"; 165 label = "status:red:health_led";
162 gpios = <&gpio1 5 0>; 166 gpios = <&gpio1 5 0>;
163 }; 167 };
164 health_led2 {
165 label = "status:white:health_led";
166 gpios = <&gpio1 4 0>;
167 };
168 backup_led { 168 backup_led {
169 label = "status:blue:backup_led"; 169 label = "status:blue:backup_led";
170 gpios = <&gpio0 15 0>; 170 gpios = <&gpio0 15 0>;
diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi
index 8aad00f81ed9..f7bec3b1ba32 100644
--- a/arch/arm/boot/dts/orion5x.dtsi
+++ b/arch/arm/boot/dts/orion5x.dtsi
@@ -13,6 +13,9 @@
13 compatible = "marvell,orion5x"; 13 compatible = "marvell,orion5x";
14 interrupt-parent = <&intc>; 14 interrupt-parent = <&intc>;
15 15
16 aliases {
17 gpio0 = &gpio0;
18 };
16 intc: interrupt-controller { 19 intc: interrupt-controller {
17 compatible = "marvell,orion-intc", "marvell,intc"; 20 compatible = "marvell,orion-intc", "marvell,intc";
18 interrupt-controller; 21 interrupt-controller;
@@ -32,7 +35,9 @@
32 #gpio-cells = <2>; 35 #gpio-cells = <2>;
33 gpio-controller; 36 gpio-controller;
34 reg = <0x10100 0x40>; 37 reg = <0x10100 0x40>;
35 ngpio = <32>; 38 ngpios = <32>;
39 interrupt-controller;
40 #interrupt-cells = <2>;
36 interrupts = <6>, <7>, <8>, <9>; 41 interrupts = <6>, <7>, <8>, <9>;
37 }; 42 };
38 43
@@ -91,7 +96,7 @@
91 reg = <0x90000 0x10000>, 96 reg = <0x90000 0x10000>,
92 <0xf2200000 0x800>; 97 <0xf2200000 0x800>;
93 reg-names = "regs", "sram"; 98 reg-names = "regs", "sram";
94 interrupts = <22>; 99 interrupts = <28>;
95 status = "okay"; 100 status = "okay";
96 }; 101 };
97 }; 102 };
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 48d00a099ce3..3d3f64d2111a 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -385,7 +385,7 @@
385 385
386 spi@7000d800 { 386 spi@7000d800 {
387 compatible = "nvidia,tegra20-slink"; 387 compatible = "nvidia,tegra20-slink";
388 reg = <0x7000d480 0x200>; 388 reg = <0x7000d800 0x200>;
389 interrupts = <0 83 0x04>; 389 interrupts = <0 83 0x04>;
390 nvidia,dma-request-selector = <&apbdma 17>; 390 nvidia,dma-request-selector = <&apbdma 17>;
391 #address-cells = <1>; 391 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 9d87a3ffe998..dbf46c272562 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -372,7 +372,7 @@
372 372
373 spi@7000d800 { 373 spi@7000d800 {
374 compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink"; 374 compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
375 reg = <0x7000d480 0x200>; 375 reg = <0x7000d800 0x200>;
376 interrupts = <0 83 0x04>; 376 interrupts = <0 83 0x04>;
377 nvidia,dma-request-selector = <&apbdma 17>; 377 nvidia,dma-request-selector = <&apbdma 17>;
378 #address-cells = <1>; 378 #address-cells = <1>;
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index 720799fd3a81..dff714d886d5 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -24,7 +24,7 @@ extern struct arm_delay_ops {
24 void (*delay)(unsigned long); 24 void (*delay)(unsigned long);
25 void (*const_udelay)(unsigned long); 25 void (*const_udelay)(unsigned long);
26 void (*udelay)(unsigned long); 26 void (*udelay)(unsigned long);
27 bool const_clock; 27 unsigned long ticks_per_jiffy;
28} arm_delay_ops; 28} arm_delay_ops;
29 29
30#define __delay(n) arm_delay_ops.delay(n) 30#define __delay(n) arm_delay_ops.delay(n)
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 8c5e828f484d..91b99abe7a95 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page);
41#endif 41#endif
42#endif 42#endif
43 43
44/*
45 * Needed to be able to broadcast the TLB invalidation for kmap.
46 */
47#ifdef CONFIG_ARM_ERRATA_798181
48#undef ARCH_NEEDS_KMAP_HIGH_GET
49#endif
50
44#ifdef ARCH_NEEDS_KMAP_HIGH_GET 51#ifdef ARCH_NEEDS_KMAP_HIGH_GET
45extern void *kmap_high_get(struct page *page); 52extern void *kmap_high_get(struct page *page);
46#else 53#else
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 863a6611323c..a7b85e0d0cc1 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
28#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) 28#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
29 29
30DECLARE_PER_CPU(atomic64_t, active_asids);
31
30#else /* !CONFIG_CPU_HAS_ASID */ 32#else /* !CONFIG_CPU_HAS_ASID */
31 33
32#ifdef CONFIG_MMU 34#ifdef CONFIG_MMU
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 4db8c8820f0d..9e9c041358ca 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -450,6 +450,21 @@ static inline void local_flush_bp_all(void)
450 isb(); 450 isb();
451} 451}
452 452
453#ifdef CONFIG_ARM_ERRATA_798181
454static inline void dummy_flush_tlb_a15_erratum(void)
455{
456 /*
457 * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
458 */
459 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
460 dsb();
461}
462#else
463static inline void dummy_flush_tlb_a15_erratum(void)
464{
465}
466#endif
467
453/* 468/*
454 * flush_pmd_entry 469 * flush_pmd_entry
455 * 470 *
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 3248cde504ed..fefd7f971437 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old)
276 */ 276 */
277 277
278.macro mcount_enter 278.macro mcount_enter
279/*
280 * This pad compensates for the push {lr} at the call site. Note that we are
281 * unable to unwind through a function which does not otherwise save its lr.
282 */
283 UNWIND(.pad #4)
279 stmdb sp!, {r0-r3, lr} 284 stmdb sp!, {r0-r3, lr}
285 UNWIND(.save {r0-r3, lr})
280.endm 286.endm
281 287
282.macro mcount_get_lr reg 288.macro mcount_get_lr reg
@@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old)
289.endm 295.endm
290 296
291ENTRY(__gnu_mcount_nc) 297ENTRY(__gnu_mcount_nc)
298UNWIND(.fnstart)
292#ifdef CONFIG_DYNAMIC_FTRACE 299#ifdef CONFIG_DYNAMIC_FTRACE
293 mov ip, lr 300 mov ip, lr
294 ldmia sp!, {lr} 301 ldmia sp!, {lr}
@@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc)
296#else 303#else
297 __mcount 304 __mcount
298#endif 305#endif
306UNWIND(.fnend)
299ENDPROC(__gnu_mcount_nc) 307ENDPROC(__gnu_mcount_nc)
300 308
301#ifdef CONFIG_DYNAMIC_FTRACE 309#ifdef CONFIG_DYNAMIC_FTRACE
302ENTRY(ftrace_caller) 310ENTRY(ftrace_caller)
311UNWIND(.fnstart)
303 __ftrace_caller 312 __ftrace_caller
313UNWIND(.fnend)
304ENDPROC(ftrace_caller) 314ENDPROC(ftrace_caller)
305#endif 315#endif
306 316
307#ifdef CONFIG_FUNCTION_GRAPH_TRACER 317#ifdef CONFIG_FUNCTION_GRAPH_TRACER
308ENTRY(ftrace_graph_caller) 318ENTRY(ftrace_graph_caller)
319UNWIND(.fnstart)
309 __ftrace_graph_caller 320 __ftrace_graph_caller
321UNWIND(.fnend)
310ENDPROC(ftrace_graph_caller) 322ENDPROC(ftrace_graph_caller)
311#endif 323#endif
312 324
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index e0eb9a1cae77..8bac553fe213 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -267,7 +267,7 @@ __create_page_tables:
267 addne r6, r6, #1 << SECTION_SHIFT 267 addne r6, r6, #1 << SECTION_SHIFT
268 strne r6, [r3] 268 strne r6, [r3]
269 269
270#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) 270#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
271 sub r4, r4, #4 @ Fixup page table pointer 271 sub r4, r4, #4 @ Fixup page table pointer
272 @ for 64-bit descriptors 272 @ for 64-bit descriptors
273#endif 273#endif
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 96093b75ab90..5dc1aa6f0f7d 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused)
966 } 966 }
967 967
968 if (err) { 968 if (err) {
969 pr_warning("CPU %d debug is powered down!\n", cpu); 969 pr_warn_once("CPU %d debug is powered down!\n", cpu);
970 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); 970 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
971 return; 971 return;
972 } 972 }
@@ -987,7 +987,7 @@ clear_vcr:
987 isb(); 987 isb();
988 988
989 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { 989 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
990 pr_warning("CPU %d failed to disable vector catch\n", cpu); 990 pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
991 return; 991 return;
992 } 992 }
993 993
@@ -1007,7 +1007,7 @@ clear_vcr:
1007 } 1007 }
1008 1008
1009 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { 1009 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
1010 pr_warning("CPU %d failed to clear debug register pairs\n", cpu); 1010 pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
1011 return; 1011 return;
1012 } 1012 }
1013 1013
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 3f6cbb2e3eda..d343a6c3a6d1 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -353,6 +353,23 @@ void __init early_print(const char *str, ...)
353 printk("%s", buf); 353 printk("%s", buf);
354} 354}
355 355
356static void __init cpuid_init_hwcaps(void)
357{
358 unsigned int divide_instrs;
359
360 if (cpu_architecture() < CPU_ARCH_ARMv7)
361 return;
362
363 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
364
365 switch (divide_instrs) {
366 case 2:
367 elf_hwcap |= HWCAP_IDIVA;
368 case 1:
369 elf_hwcap |= HWCAP_IDIVT;
370 }
371}
372
356static void __init feat_v6_fixup(void) 373static void __init feat_v6_fixup(void)
357{ 374{
358 int id = read_cpuid_id(); 375 int id = read_cpuid_id();
@@ -483,8 +500,11 @@ static void __init setup_processor(void)
483 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", 500 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
484 list->elf_name, ENDIANNESS); 501 list->elf_name, ENDIANNESS);
485 elf_hwcap = list->elf_hwcap; 502 elf_hwcap = list->elf_hwcap;
503
504 cpuid_init_hwcaps();
505
486#ifndef CONFIG_ARM_THUMB 506#ifndef CONFIG_ARM_THUMB
487 elf_hwcap &= ~HWCAP_THUMB; 507 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
488#endif 508#endif
489 509
490 feat_v6_fixup(); 510 feat_v6_fixup();
@@ -524,7 +544,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
524 size -= start & ~PAGE_MASK; 544 size -= start & ~PAGE_MASK;
525 bank->start = PAGE_ALIGN(start); 545 bank->start = PAGE_ALIGN(start);
526 546
527#ifndef CONFIG_LPAE 547#ifndef CONFIG_ARM_LPAE
528 if (bank->start + size < bank->start) { 548 if (bank->start + size < bank->start) {
529 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " 549 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
530 "32-bit physical address space\n", (long long)start); 550 "32-bit physical address space\n", (long long)start);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 79078edbb9bc..1f2ccccaf009 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb,
673 if (freq->flags & CPUFREQ_CONST_LOOPS) 673 if (freq->flags & CPUFREQ_CONST_LOOPS)
674 return NOTIFY_OK; 674 return NOTIFY_OK;
675 675
676 if (arm_delay_ops.const_clock)
677 return NOTIFY_OK;
678
679 if (!per_cpu(l_p_j_ref, cpu)) { 676 if (!per_cpu(l_p_j_ref, cpu)) {
680 per_cpu(l_p_j_ref, cpu) = 677 per_cpu(l_p_j_ref, cpu) =
681 per_cpu(cpu_data, cpu).loops_per_jiffy; 678 per_cpu(cpu_data, cpu).loops_per_jiffy;
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index bd0300531399..e82e1d248772 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -12,6 +12,7 @@
12 12
13#include <asm/smp_plat.h> 13#include <asm/smp_plat.h>
14#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
15#include <asm/mmu_context.h>
15 16
16/**********************************************************************/ 17/**********************************************************************/
17 18
@@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored)
69 local_flush_bp_all(); 70 local_flush_bp_all();
70} 71}
71 72
73#ifdef CONFIG_ARM_ERRATA_798181
74static int erratum_a15_798181(void)
75{
76 unsigned int midr = read_cpuid_id();
77
78 /* Cortex-A15 r0p0..r3p2 affected */
79 if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
80 return 0;
81 return 1;
82}
83#else
84static int erratum_a15_798181(void)
85{
86 return 0;
87}
88#endif
89
90static void ipi_flush_tlb_a15_erratum(void *arg)
91{
92 dmb();
93}
94
95static void broadcast_tlb_a15_erratum(void)
96{
97 if (!erratum_a15_798181())
98 return;
99
100 dummy_flush_tlb_a15_erratum();
101 smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
102 NULL, 1);
103}
104
105static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
106{
107 int cpu;
108 cpumask_t mask = { CPU_BITS_NONE };
109
110 if (!erratum_a15_798181())
111 return;
112
113 dummy_flush_tlb_a15_erratum();
114 for_each_online_cpu(cpu) {
115 if (cpu == smp_processor_id())
116 continue;
117 /*
118 * We only need to send an IPI if the other CPUs are running
119 * the same ASID as the one being invalidated. There is no
120 * need for locking around the active_asids check since the
121 * switch_mm() function has at least one dmb() (as required by
122 * this workaround) in case a context switch happens on
123 * another CPU after the condition below.
124 */
125 if (atomic64_read(&mm->context.id) ==
126 atomic64_read(&per_cpu(active_asids, cpu)))
127 cpumask_set_cpu(cpu, &mask);
128 }
129 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
130}
131
72void flush_tlb_all(void) 132void flush_tlb_all(void)
73{ 133{
74 if (tlb_ops_need_broadcast()) 134 if (tlb_ops_need_broadcast())
75 on_each_cpu(ipi_flush_tlb_all, NULL, 1); 135 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
76 else 136 else
77 local_flush_tlb_all(); 137 local_flush_tlb_all();
138 broadcast_tlb_a15_erratum();
78} 139}
79 140
80void flush_tlb_mm(struct mm_struct *mm) 141void flush_tlb_mm(struct mm_struct *mm)
@@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm)
83 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); 144 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
84 else 145 else
85 local_flush_tlb_mm(mm); 146 local_flush_tlb_mm(mm);
147 broadcast_tlb_mm_a15_erratum(mm);
86} 148}
87 149
88void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 150void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
@@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
95 &ta, 1); 157 &ta, 1);
96 } else 158 } else
97 local_flush_tlb_page(vma, uaddr); 159 local_flush_tlb_page(vma, uaddr);
160 broadcast_tlb_mm_a15_erratum(vma->vm_mm);
98} 161}
99 162
100void flush_tlb_kernel_page(unsigned long kaddr) 163void flush_tlb_kernel_page(unsigned long kaddr)
@@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
105 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); 168 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
106 } else 169 } else
107 local_flush_tlb_kernel_page(kaddr); 170 local_flush_tlb_kernel_page(kaddr);
171 broadcast_tlb_a15_erratum();
108} 172}
109 173
110void flush_tlb_range(struct vm_area_struct *vma, 174void flush_tlb_range(struct vm_area_struct *vma,
@@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
119 &ta, 1); 183 &ta, 1);
120 } else 184 } else
121 local_flush_tlb_range(vma, start, end); 185 local_flush_tlb_range(vma, start, end);
186 broadcast_tlb_mm_a15_erratum(vma->vm_mm);
122} 187}
123 188
124void flush_tlb_kernel_range(unsigned long start, unsigned long end) 189void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
130 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); 195 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
131 } else 196 } else
132 local_flush_tlb_kernel_range(start, end); 197 local_flush_tlb_kernel_range(start, end);
198 broadcast_tlb_a15_erratum();
133} 199}
134 200
135void flush_bp_all(void) 201void flush_bp_all(void)
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
index c9a17316e9fe..0e4cfe123b38 100644
--- a/arch/arm/kvm/vgic.c
+++ b/arch/arm/kvm/vgic.c
@@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
883 lr, irq, vgic_cpu->vgic_lr[lr]); 883 lr, irq, vgic_cpu->vgic_lr[lr]);
884 BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); 884 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
885 vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; 885 vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
886 886 return true;
887 goto out;
888 } 887 }
889 888
890 /* Try to use another LR for this interrupt */ 889 /* Try to use another LR for this interrupt */
@@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
898 vgic_cpu->vgic_irq_lr_map[irq] = lr; 897 vgic_cpu->vgic_irq_lr_map[irq] = lr;
899 set_bit(lr, vgic_cpu->lr_used); 898 set_bit(lr, vgic_cpu->lr_used);
900 899
901out:
902 if (!vgic_irq_is_edge(vcpu, irq)) 900 if (!vgic_irq_is_edge(vcpu, irq))
903 vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; 901 vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
904 902
@@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1018 1016
1019 kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); 1017 kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
1020 1018
1021 /*
1022 * We do not need to take the distributor lock here, since the only
1023 * action we perform is clearing the irq_active_bit for an EOIed
1024 * level interrupt. There is a potential race with
1025 * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we
1026 * check if the interrupt is already active. Two possibilities:
1027 *
1028 * - The queuing is occurring on the same vcpu: cannot happen,
1029 * as we're already in the context of this vcpu, and
1030 * executing the handler
1031 * - The interrupt has been migrated to another vcpu, and we
1032 * ignore this interrupt for this run. Big deal. It is still
1033 * pending though, and will get considered when this vcpu
1034 * exits.
1035 */
1036 if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { 1019 if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
1037 /* 1020 /*
1038 * Some level interrupts have been EOIed. Clear their 1021 * Some level interrupts have been EOIed. Clear their
@@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1054 } else { 1037 } else {
1055 vgic_cpu_irq_clear(vcpu, irq); 1038 vgic_cpu_irq_clear(vcpu, irq);
1056 } 1039 }
1040
1041 /*
1042 * Despite being EOIed, the LR may not have
1043 * been marked as empty.
1044 */
1045 set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
1046 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
1057 } 1047 }
1058 } 1048 }
1059 1049
@@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1064} 1054}
1065 1055
1066/* 1056/*
1067 * Sync back the VGIC state after a guest run. We do not really touch 1057 * Sync back the VGIC state after a guest run. The distributor lock is
1068 * the distributor here (the irq_pending_on_cpu bit is safe to set), 1058 * needed so we don't get preempted in the middle of the state processing.
1069 * so there is no need for taking its lock.
1070 */ 1059 */
1071static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) 1060static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1072{ 1061{
@@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1112 1101
1113void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) 1102void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1114{ 1103{
1104 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1105
1115 if (!irqchip_in_kernel(vcpu->kvm)) 1106 if (!irqchip_in_kernel(vcpu->kvm))
1116 return; 1107 return;
1117 1108
1109 spin_lock(&dist->lock);
1118 __kvm_vgic_sync_hwstate(vcpu); 1110 __kvm_vgic_sync_hwstate(vcpu);
1111 spin_unlock(&dist->lock);
1119} 1112}
1120 1113
1121int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) 1114int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 6b93f6a1a3c7..64dbfa57204a 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles)
58static void __timer_const_udelay(unsigned long xloops) 58static void __timer_const_udelay(unsigned long xloops)
59{ 59{
60 unsigned long long loops = xloops; 60 unsigned long long loops = xloops;
61 loops *= loops_per_jiffy; 61 loops *= arm_delay_ops.ticks_per_jiffy;
62 __timer_delay(loops >> UDELAY_SHIFT); 62 __timer_delay(loops >> UDELAY_SHIFT);
63} 63}
64 64
@@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
73 pr_info("Switching to timer-based delay loop\n"); 73 pr_info("Switching to timer-based delay loop\n");
74 delay_timer = timer; 74 delay_timer = timer;
75 lpj_fine = timer->freq / HZ; 75 lpj_fine = timer->freq / HZ;
76 loops_per_jiffy = lpj_fine; 76
77 /* cpufreq may scale loops_per_jiffy, so keep a private copy */
78 arm_delay_ops.ticks_per_jiffy = lpj_fine;
77 arm_delay_ops.delay = __timer_delay; 79 arm_delay_ops.delay = __timer_delay;
78 arm_delay_ops.const_udelay = __timer_const_udelay; 80 arm_delay_ops.const_udelay = __timer_const_udelay;
79 arm_delay_ops.udelay = __timer_udelay; 81 arm_delay_ops.udelay = __timer_udelay;
80 arm_delay_ops.const_clock = true; 82
81 delay_calibrated = true; 83 delay_calibrated = true;
82 } else { 84 } else {
83 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); 85 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index e698f26cc0cb..52e4bb5cf12d 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -22,19 +22,9 @@
22 22
23static struct map_desc cns3xxx_io_desc[] __initdata = { 23static struct map_desc cns3xxx_io_desc[] __initdata = {
24 { 24 {
25 .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT, 25 .virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT,
26 .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE), 26 .pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE),
27 .length = SZ_4K, 27 .length = SZ_8K,
28 .type = MT_DEVICE,
29 }, {
30 .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT,
31 .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE),
32 .length = SZ_4K,
33 .type = MT_DEVICE,
34 }, {
35 .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT,
36 .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE),
37 .length = SZ_4K,
38 .type = MT_DEVICE, 28 .type = MT_DEVICE,
39 }, { 29 }, {
40 .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT, 30 .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT,
diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
index 191c8e57f289..b1021aafa481 100644
--- a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
+++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
@@ -94,10 +94,10 @@
94#define RTC_INTR_STS_OFFSET 0x34 94#define RTC_INTR_STS_OFFSET 0x34
95 95
96#define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */ 96#define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */
97#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */ 97#define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */
98 98
99#define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */ 99#define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */
100#define CNS3XXX_PM_BASE_VIRT 0xFFF08000 100#define CNS3XXX_PM_BASE_VIRT 0xFB001000
101 101
102#define PM_CLK_GATE_OFFSET 0x00 102#define PM_CLK_GATE_OFFSET 0x00
103#define PM_SOFT_RST_OFFSET 0x04 103#define PM_SOFT_RST_OFFSET 0x04
@@ -109,7 +109,7 @@
109#define PM_PLL_HM_PD_OFFSET 0x1C 109#define PM_PLL_HM_PD_OFFSET 0x1C
110 110
111#define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */ 111#define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */
112#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000 112#define CNS3XXX_UART0_BASE_VIRT 0xFB002000
113 113
114#define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */ 114#define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */
115#define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000 115#define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000
@@ -130,7 +130,7 @@
130#define CNS3XXX_I2S_BASE_VIRT 0xFFF10000 130#define CNS3XXX_I2S_BASE_VIRT 0xFFF10000
131 131
132#define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */ 132#define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */
133#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800 133#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000
134 134
135#define TIMER1_COUNTER_OFFSET 0x00 135#define TIMER1_COUNTER_OFFSET 0x00
136#define TIMER1_AUTO_RELOAD_OFFSET 0x04 136#define TIMER1_AUTO_RELOAD_OFFSET 0x04
@@ -227,16 +227,16 @@
227 * Testchip peripheral and fpga gic regions 227 * Testchip peripheral and fpga gic regions
228 */ 228 */
229#define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */ 229#define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */
230#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000 230#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000
231 231
232#define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */ 232#define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */
233#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100 233#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100)
234 234
235#define CNS3XXX_TC11MP_TWD_BASE 0x90000600 235#define CNS3XXX_TC11MP_TWD_BASE 0x90000600
236#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600 236#define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600)
237 237
238#define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */ 238#define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */
239#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000 239#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000)
240 240
241#define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */ 241#define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */
242#define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000 242#define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000
diff --git a/arch/arm/mach-ep93xx/include/mach/uncompress.h b/arch/arm/mach-ep93xx/include/mach/uncompress.h
index d2afb4dd82ab..b5cc77d2380b 100644
--- a/arch/arm/mach-ep93xx/include/mach/uncompress.h
+++ b/arch/arm/mach-ep93xx/include/mach/uncompress.h
@@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr)
47 47
48static inline void putc(int c) 48static inline void putc(int c)
49{ 49{
50 /* Transmit fifo not full? */ 50 int i;
51 while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF) 51
52 ; 52 for (i = 0; i < 10000; i++) {
53 /* Transmit fifo not full? */
54 if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF))
55 break;
56 }
53 57
54 __raw_writeb(c, PHYS_UART_DATA); 58 __raw_writeb(c, PHYS_UART_DATA);
55} 59}
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index e13a8fa5e62c..2193c834f55c 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -257,6 +257,7 @@ int __init mx35_clocks_init(void)
257 clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); 257 clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
258 clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); 258 clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
259 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); 259 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
260 clk_register_clkdev(clk[admux_gate], "audmux", NULL);
260 261
261 clk_prepare_enable(clk[spba_gate]); 262 clk_prepare_enable(clk[spba_gate]);
262 clk_prepare_enable(clk[gpio1_gate]); 263 clk_prepare_enable(clk[gpio1_gate]);
@@ -265,6 +266,7 @@ int __init mx35_clocks_init(void)
265 clk_prepare_enable(clk[iim_gate]); 266 clk_prepare_enable(clk[iim_gate]);
266 clk_prepare_enable(clk[emi_gate]); 267 clk_prepare_enable(clk[emi_gate]);
267 clk_prepare_enable(clk[max_gate]); 268 clk_prepare_enable(clk[max_gate]);
269 clk_prepare_enable(clk[iomuxc_gate]);
268 270
269 /* 271 /*
270 * SCC is needed to boot via mmc after a watchdog reset. The clock code 272 * SCC is needed to boot via mmc after a watchdog reset. The clock code
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 2f9ff93a4e61..d38e54f5b6d7 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m"
115static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; 115static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
116static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; 116static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
117static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; 117static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
118static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", }; 118static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
119static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; 119static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
120static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; 120static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
121static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; 121static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
@@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void)
443 443
444 clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); 444 clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
445 clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); 445 clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
446 clk_register_clkdev(clk[twd], NULL, "smp_twd");
447 clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); 446 clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
448 clk_register_clkdev(clk[ahb], "ahb", NULL); 447 clk_register_clkdev(clk[ahb], "ahb", NULL);
449 clk_register_clkdev(clk[cko1], "cko1", NULL); 448 clk_register_clkdev(clk[cko1], "cko1", NULL);
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 5a800bfcec5b..5bf4a97ab241 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *);
110 110
111extern void imx_enable_cpu(int cpu, bool enable); 111extern void imx_enable_cpu(int cpu, bool enable);
112extern void imx_set_cpu_jump(int cpu, void *jump_addr); 112extern void imx_set_cpu_jump(int cpu, void *jump_addr);
113extern u32 imx_get_cpu_arg(int cpu);
114extern void imx_set_cpu_arg(int cpu, u32 arg);
113extern void v7_cpu_resume(void); 115extern void v7_cpu_resume(void);
114extern u32 *pl310_get_save_ptr(void); 116extern u32 *pl310_get_save_ptr(void);
115#ifdef CONFIG_SMP 117#ifdef CONFIG_SMP
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 7bc5fe15dda2..361a253e2b63 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -46,11 +46,23 @@ static inline void cpu_enter_lowpower(void)
46void imx_cpu_die(unsigned int cpu) 46void imx_cpu_die(unsigned int cpu)
47{ 47{
48 cpu_enter_lowpower(); 48 cpu_enter_lowpower();
49 /*
50 * We use the cpu jumping argument register to sync with
51 * imx_cpu_kill() which is running on cpu0 and waiting for
52 * the register being cleared to kill the cpu.
53 */
54 imx_set_cpu_arg(cpu, ~0);
49 cpu_do_idle(); 55 cpu_do_idle();
50} 56}
51 57
52int imx_cpu_kill(unsigned int cpu) 58int imx_cpu_kill(unsigned int cpu)
53{ 59{
60 unsigned long timeout = jiffies + msecs_to_jiffies(50);
61
62 while (imx_get_cpu_arg(cpu) == 0)
63 if (time_after(jiffies, timeout))
64 return 0;
54 imx_enable_cpu(cpu, false); 65 imx_enable_cpu(cpu, false);
66 imx_set_cpu_arg(cpu, 0);
55 return 1; 67 return 1;
56} 68}
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index e15f1555c59b..09a742f8c7ab 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr)
43 src_base + SRC_GPR1 + cpu * 8); 43 src_base + SRC_GPR1 + cpu * 8);
44} 44}
45 45
46u32 imx_get_cpu_arg(int cpu)
47{
48 cpu = cpu_logical_map(cpu);
49 return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
50}
51
52void imx_set_cpu_arg(int cpu, u32 arg)
53{
54 cpu = cpu_logical_map(cpu);
55 writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
56}
57
46void imx_src_prepare_restart(void) 58void imx_src_prepare_restart(void)
47{ 59{
48 u32 val; 60 u32 val;
diff --git a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
index f655b2637b0e..e5f70415905a 100644
--- a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
+++ b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
@@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = {
20 .duplex = DUPLEX_FULL, 20 .duplex = DUPLEX_FULL,
21}; 21};
22 22
23static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = {
24 .phy_addr = MV643XX_ETH_PHY_ADDR(11),
25};
26
23void __init iomega_ix2_200_init(void) 27void __init iomega_ix2_200_init(void)
24{ 28{
25 /* 29 /*
26 * Basic setup. Needs to be called early. 30 * Basic setup. Needs to be called early.
27 */ 31 */
28 kirkwood_ge01_init(&iomega_ix2_200_ge00_data); 32 kirkwood_ge00_init(&iomega_ix2_200_ge00_data);
33 kirkwood_ge01_init(&iomega_ix2_200_ge01_data);
29} 34}
diff --git a/arch/arm/mach-kirkwood/guruplug-setup.c b/arch/arm/mach-kirkwood/guruplug-setup.c
index 1c6e736cbbf8..08dd739aa709 100644
--- a/arch/arm/mach-kirkwood/guruplug-setup.c
+++ b/arch/arm/mach-kirkwood/guruplug-setup.c
@@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = {
53 53
54static struct mvsdio_platform_data guruplug_mvsdio_data = { 54static struct mvsdio_platform_data guruplug_mvsdio_data = {
55 /* unfortunately the CD signal has not been connected */ 55 /* unfortunately the CD signal has not been connected */
56 .gpio_card_detect = -1,
57 .gpio_write_protect = -1,
56}; 58};
57 59
58static struct gpio_led guruplug_led_pins[] = { 60static struct gpio_led guruplug_led_pins[] = {
diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c
index 8ddd69fdc937..6a6eb548307d 100644
--- a/arch/arm/mach-kirkwood/openrd-setup.c
+++ b/arch/arm/mach-kirkwood/openrd-setup.c
@@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = {
55 55
56static struct mvsdio_platform_data openrd_mvsdio_data = { 56static struct mvsdio_platform_data openrd_mvsdio_data = {
57 .gpio_card_detect = 29, /* MPP29 used as SD card detect */ 57 .gpio_card_detect = 29, /* MPP29 used as SD card detect */
58 .gpio_write_protect = -1,
58}; 59};
59 60
60static unsigned int openrd_mpp_config[] __initdata = { 61static unsigned int openrd_mpp_config[] __initdata = {
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c
index c7d93b48926b..d24223166e06 100644
--- a/arch/arm/mach-kirkwood/rd88f6281-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c
@@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = {
69 69
70static struct mvsdio_platform_data rd88f6281_mvsdio_data = { 70static struct mvsdio_platform_data rd88f6281_mvsdio_data = {
71 .gpio_card_detect = 28, 71 .gpio_card_detect = 28,
72 .gpio_write_protect = -1,
72}; 73};
73 74
74static unsigned int rd88f6281_mpp_config[] __initdata = { 75static unsigned int rd88f6281_mpp_config[] __initdata = {
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 2969027f02fa..f9fd77e8f1f5 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles,
62{ 62{
63 u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); 63 u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
64 64
65 writel_relaxed(0, event_base + TIMER_CLEAR); 65 ctrl &= ~TIMER_ENABLE_EN;
66 writel_relaxed(ctrl, event_base + TIMER_ENABLE);
67
68 writel_relaxed(ctrl, event_base + TIMER_CLEAR);
66 writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); 69 writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
67 writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); 70 writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
68 return 0; 71 return 0;
diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c
index 274ff58271de..d5970f5a1e8d 100644
--- a/arch/arm/mach-mvebu/irq-armada-370-xp.c
+++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c
@@ -44,6 +44,8 @@
44 44
45#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) 45#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
46 46
47#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
48
47#define ACTIVE_DOORBELLS (8) 49#define ACTIVE_DOORBELLS (8)
48 50
49static DEFINE_RAW_SPINLOCK(irq_controller_lock); 51static DEFINE_RAW_SPINLOCK(irq_controller_lock);
@@ -59,36 +61,26 @@ static struct irq_domain *armada_370_xp_mpic_domain;
59 */ 61 */
60static void armada_370_xp_irq_mask(struct irq_data *d) 62static void armada_370_xp_irq_mask(struct irq_data *d)
61{ 63{
62#ifdef CONFIG_SMP
63 irq_hw_number_t hwirq = irqd_to_hwirq(d); 64 irq_hw_number_t hwirq = irqd_to_hwirq(d);
64 65
65 if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) 66 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
66 writel(hwirq, main_int_base + 67 writel(hwirq, main_int_base +
67 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); 68 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
68 else 69 else
69 writel(hwirq, per_cpu_int_base + 70 writel(hwirq, per_cpu_int_base +
70 ARMADA_370_XP_INT_SET_MASK_OFFS); 71 ARMADA_370_XP_INT_SET_MASK_OFFS);
71#else
72 writel(irqd_to_hwirq(d),
73 per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
74#endif
75} 72}
76 73
77static void armada_370_xp_irq_unmask(struct irq_data *d) 74static void armada_370_xp_irq_unmask(struct irq_data *d)
78{ 75{
79#ifdef CONFIG_SMP
80 irq_hw_number_t hwirq = irqd_to_hwirq(d); 76 irq_hw_number_t hwirq = irqd_to_hwirq(d);
81 77
82 if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) 78 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
83 writel(hwirq, main_int_base + 79 writel(hwirq, main_int_base +
84 ARMADA_370_XP_INT_SET_ENABLE_OFFS); 80 ARMADA_370_XP_INT_SET_ENABLE_OFFS);
85 else 81 else
86 writel(hwirq, per_cpu_int_base + 82 writel(hwirq, per_cpu_int_base +
87 ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 83 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
88#else
89 writel(irqd_to_hwirq(d),
90 per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
91#endif
92} 84}
93 85
94#ifdef CONFIG_SMP 86#ifdef CONFIG_SMP
@@ -144,10 +136,14 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
144 unsigned int virq, irq_hw_number_t hw) 136 unsigned int virq, irq_hw_number_t hw)
145{ 137{
146 armada_370_xp_irq_mask(irq_get_irq_data(virq)); 138 armada_370_xp_irq_mask(irq_get_irq_data(virq));
147 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); 139 if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
140 writel(hw, per_cpu_int_base +
141 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
142 else
143 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
148 irq_set_status_flags(virq, IRQ_LEVEL); 144 irq_set_status_flags(virq, IRQ_LEVEL);
149 145
150 if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) { 146 if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
151 irq_set_percpu_devid(virq); 147 irq_set_percpu_devid(virq);
152 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, 148 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
153 handle_percpu_devid_irq); 149 handle_percpu_devid_irq);
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 3218f1f2c0e0..e7b781d3788f 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = {
41 .lower_margin = 4, 41 .lower_margin = 4,
42 .hsync_len = 1, 42 .hsync_len = 1,
43 .vsync_len = 1, 43 .vsync_len = 1,
44 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
45 FB_SYNC_DOTCLK_FAILING_ACT,
46 }, 44 },
47}; 45};
48 46
@@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = {
59 .lower_margin = 10, 57 .lower_margin = 10,
60 .hsync_len = 10, 58 .hsync_len = 10,
61 .vsync_len = 10, 59 .vsync_len = 10,
62 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
63 FB_SYNC_DOTCLK_FAILING_ACT,
64 }, 60 },
65}; 61};
66 62
@@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = {
77 .lower_margin = 45, 73 .lower_margin = 45,
78 .hsync_len = 1, 74 .hsync_len = 1,
79 .vsync_len = 1, 75 .vsync_len = 1,
80 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT,
81 }, 76 },
82}; 77};
83 78
@@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = {
94 .lower_margin = 13, 89 .lower_margin = 13,
95 .hsync_len = 48, 90 .hsync_len = 48,
96 .vsync_len = 3, 91 .vsync_len = 3,
97 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | 92 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
98 FB_SYNC_DATA_ENABLE_HIGH_ACT |
99 FB_SYNC_DOTCLK_FAILING_ACT,
100 }, 93 },
101}; 94};
102 95
@@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = {
113 .lower_margin = 0x15, 106 .lower_margin = 0x15,
114 .hsync_len = 64, 107 .hsync_len = 64,
115 .vsync_len = 4, 108 .vsync_len = 4,
116 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | 109 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
117 FB_SYNC_DATA_ENABLE_HIGH_ACT |
118 FB_SYNC_DOTCLK_FAILING_ACT,
119 }, 110 },
120}; 111};
121 112
@@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = {
132 .lower_margin = 2, 123 .lower_margin = 2,
133 .hsync_len = 15, 124 .hsync_len = 15,
134 .vsync_len = 15, 125 .vsync_len = 15,
135 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT
136 }, 126 },
137}; 127};
138 128
@@ -259,6 +249,8 @@ static void __init imx23_evk_init(void)
259 mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes); 249 mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes);
260 mxsfb_pdata.default_bpp = 32; 250 mxsfb_pdata.default_bpp = 32;
261 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; 251 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
252 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
253 MXSFB_SYNC_DOTCLK_FAILING_ACT;
262} 254}
263 255
264static inline void enable_clk_enet_out(void) 256static inline void enable_clk_enet_out(void)
@@ -278,6 +270,8 @@ static void __init imx28_evk_init(void)
278 mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes); 270 mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes);
279 mxsfb_pdata.default_bpp = 32; 271 mxsfb_pdata.default_bpp = 32;
280 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; 272 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
273 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
274 MXSFB_SYNC_DOTCLK_FAILING_ACT;
281 275
282 mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); 276 mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0);
283} 277}
@@ -297,6 +291,7 @@ static void __init m28evk_init(void)
297 mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes); 291 mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes);
298 mxsfb_pdata.default_bpp = 16; 292 mxsfb_pdata.default_bpp = 16;
299 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; 293 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
294 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
300} 295}
301 296
302static void __init sc_sps1_init(void) 297static void __init sc_sps1_init(void)
@@ -322,6 +317,8 @@ static void __init apx4devkit_init(void)
322 mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes); 317 mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes);
323 mxsfb_pdata.default_bpp = 32; 318 mxsfb_pdata.default_bpp = 32;
324 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; 319 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
320 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
321 MXSFB_SYNC_DOTCLK_FAILING_ACT;
325} 322}
326 323
327#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0) 324#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0)
@@ -407,6 +404,7 @@ static void __init cfa10049_init(void)
407 mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes); 404 mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
408 mxsfb_pdata.default_bpp = 32; 405 mxsfb_pdata.default_bpp = 32;
409 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; 406 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
407 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
410} 408}
411 409
412static void __init cfa10037_init(void) 410static void __init cfa10037_init(void)
@@ -423,6 +421,8 @@ static void __init apf28_init(void)
423 mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes); 421 mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes);
424 mxsfb_pdata.default_bpp = 16; 422 mxsfb_pdata.default_bpp = 16;
425 mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT; 423 mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT;
424 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
425 MXSFB_SYNC_DOTCLK_FAILING_ACT;
426} 426}
427 427
428static void __init mxs_machine_init(void) 428static void __init mxs_machine_init(void)
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
index cb7c6ae2e3fc..6c4f766365a2 100644
--- a/arch/arm/mach-omap1/clock_data.c
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -543,15 +543,6 @@ static struct clk usb_dc_ck = {
543 /* Direct from ULPD, no parent */ 543 /* Direct from ULPD, no parent */
544 .rate = 48000000, 544 .rate = 48000000,
545 .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), 545 .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
546 .enable_bit = USB_REQ_EN_SHIFT,
547};
548
549static struct clk usb_dc_ck7xx = {
550 .name = "usb_dc_ck",
551 .ops = &clkops_generic,
552 /* Direct from ULPD, no parent */
553 .rate = 48000000,
554 .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
555 .enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT, 546 .enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT,
556}; 547};
557 548
@@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = {
727 CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310), 718 CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310),
728 CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310), 719 CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310),
729 CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX), 720 CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX),
730 CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX), 721 CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX),
731 CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX),
732 CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310), 722 CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310),
733 CLK(NULL, "mclk", &mclk_16xx, CK_16XX), 723 CLK(NULL, "mclk", &mclk_16xx, CK_16XX),
734 CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310), 724 CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310),
diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c
index 3d58f335f173..0c6834ae1fc4 100644
--- a/arch/arm/mach-omap2/cclock44xx_data.c
+++ b/arch/arm/mach-omap2/cclock44xx_data.c
@@ -52,6 +52,13 @@
52 */ 52 */
53#define OMAP4_DPLL_ABE_DEFFREQ 98304000 53#define OMAP4_DPLL_ABE_DEFFREQ 98304000
54 54
55/*
56 * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section
57 * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred
58 * locked frequency for the USB DPLL is 960MHz.
59 */
60#define OMAP4_DPLL_USB_DEFFREQ 960000000
61
55/* Root clocks */ 62/* Root clocks */
56 63
57DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0); 64DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0);
@@ -1011,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel,
1011 OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK, 1018 OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK,
1012 hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops); 1019 hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops);
1013 1020
1021DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0,
1022 OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
1023 OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL);
1024
1014DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0, 1025DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0,
1015 OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL, 1026 OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
1016 OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); 1027 OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
@@ -1538,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = {
1538 CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X), 1549 CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X),
1539 CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X), 1550 CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X),
1540 CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X), 1551 CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X),
1552 CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X),
1541 CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X), 1553 CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X),
1542 CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X), 1554 CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X),
1543 CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X), 1555 CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X),
@@ -1705,5 +1717,13 @@ int __init omap4xxx_clk_init(void)
1705 if (rc) 1717 if (rc)
1706 pr_err("%s: failed to configure ABE DPLL!\n", __func__); 1718 pr_err("%s: failed to configure ABE DPLL!\n", __func__);
1707 1719
1720 /*
1721 * Lock USB DPLL on OMAP4 devices so that the L3INIT power
1722 * domain can transition to retention state when not in use.
1723 */
1724 rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ);
1725 if (rc)
1726 pr_err("%s: failed to configure USB DPLL!\n", __func__);
1727
1708 return 0; 1728 return 0;
1709} 1729}
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 40f4a03d728f..d6ba13e1c540 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -293,5 +293,8 @@ extern void omap_reserve(void);
293struct omap_hwmod; 293struct omap_hwmod;
294extern int omap_dss_reset(struct omap_hwmod *); 294extern int omap_dss_reset(struct omap_hwmod *);
295 295
296/* SoC specific clock initializer */
297extern int (*omap_clk_init)(void);
298
296#endif /* __ASSEMBLER__ */ 299#endif /* __ASSEMBLER__ */
297#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ 300#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 2c3fdd65387b..5c445ca1e271 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -55,6 +55,12 @@
55#include "prm44xx.h" 55#include "prm44xx.h"
56 56
57/* 57/*
58 * omap_clk_init: points to a function that does the SoC-specific
59 * clock initializations
60 */
61int (*omap_clk_init)(void);
62
63/*
58 * The machine specific code may provide the extra mapping besides the 64 * The machine specific code may provide the extra mapping besides the
59 * default mapping provided here. 65 * default mapping provided here.
60 */ 66 */
@@ -397,7 +403,7 @@ void __init omap2420_init_early(void)
397 omap242x_clockdomains_init(); 403 omap242x_clockdomains_init();
398 omap2420_hwmod_init(); 404 omap2420_hwmod_init();
399 omap_hwmod_init_postsetup(); 405 omap_hwmod_init_postsetup();
400 omap2420_clk_init(); 406 omap_clk_init = omap2420_clk_init;
401} 407}
402 408
403void __init omap2420_init_late(void) 409void __init omap2420_init_late(void)
@@ -427,7 +433,7 @@ void __init omap2430_init_early(void)
427 omap243x_clockdomains_init(); 433 omap243x_clockdomains_init();
428 omap2430_hwmod_init(); 434 omap2430_hwmod_init();
429 omap_hwmod_init_postsetup(); 435 omap_hwmod_init_postsetup();
430 omap2430_clk_init(); 436 omap_clk_init = omap2430_clk_init;
431} 437}
432 438
433void __init omap2430_init_late(void) 439void __init omap2430_init_late(void)
@@ -462,7 +468,7 @@ void __init omap3_init_early(void)
462 omap3xxx_clockdomains_init(); 468 omap3xxx_clockdomains_init();
463 omap3xxx_hwmod_init(); 469 omap3xxx_hwmod_init();
464 omap_hwmod_init_postsetup(); 470 omap_hwmod_init_postsetup();
465 omap3xxx_clk_init(); 471 omap_clk_init = omap3xxx_clk_init;
466} 472}
467 473
468void __init omap3430_init_early(void) 474void __init omap3430_init_early(void)
@@ -500,7 +506,7 @@ void __init ti81xx_init_early(void)
500 omap3xxx_clockdomains_init(); 506 omap3xxx_clockdomains_init();
501 omap3xxx_hwmod_init(); 507 omap3xxx_hwmod_init();
502 omap_hwmod_init_postsetup(); 508 omap_hwmod_init_postsetup();
503 omap3xxx_clk_init(); 509 omap_clk_init = omap3xxx_clk_init;
504} 510}
505 511
506void __init omap3_init_late(void) 512void __init omap3_init_late(void)
@@ -568,7 +574,7 @@ void __init am33xx_init_early(void)
568 am33xx_clockdomains_init(); 574 am33xx_clockdomains_init();
569 am33xx_hwmod_init(); 575 am33xx_hwmod_init();
570 omap_hwmod_init_postsetup(); 576 omap_hwmod_init_postsetup();
571 am33xx_clk_init(); 577 omap_clk_init = am33xx_clk_init;
572} 578}
573#endif 579#endif
574 580
@@ -593,7 +599,7 @@ void __init omap4430_init_early(void)
593 omap44xx_clockdomains_init(); 599 omap44xx_clockdomains_init();
594 omap44xx_hwmod_init(); 600 omap44xx_hwmod_init();
595 omap_hwmod_init_postsetup(); 601 omap_hwmod_init_postsetup();
596 omap4xxx_clk_init(); 602 omap_clk_init = omap4xxx_clk_init;
597} 603}
598 604
599void __init omap4430_init_late(void) 605void __init omap4430_init_late(void)
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index c2c798c08c2b..a202a4785104 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1368,7 +1368,9 @@ static void _enable_sysc(struct omap_hwmod *oh)
1368 } 1368 }
1369 1369
1370 if (sf & SYSC_HAS_MIDLEMODE) { 1370 if (sf & SYSC_HAS_MIDLEMODE) {
1371 if (oh->flags & HWMOD_SWSUP_MSTANDBY) { 1371 if (oh->flags & HWMOD_FORCE_MSTANDBY) {
1372 idlemode = HWMOD_IDLEMODE_FORCE;
1373 } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
1372 idlemode = HWMOD_IDLEMODE_NO; 1374 idlemode = HWMOD_IDLEMODE_NO;
1373 } else { 1375 } else {
1374 if (sf & SYSC_HAS_ENAWAKEUP) 1376 if (sf & SYSC_HAS_ENAWAKEUP)
@@ -1440,7 +1442,8 @@ static void _idle_sysc(struct omap_hwmod *oh)
1440 } 1442 }
1441 1443
1442 if (sf & SYSC_HAS_MIDLEMODE) { 1444 if (sf & SYSC_HAS_MIDLEMODE) {
1443 if (oh->flags & HWMOD_SWSUP_MSTANDBY) { 1445 if ((oh->flags & HWMOD_SWSUP_MSTANDBY) ||
1446 (oh->flags & HWMOD_FORCE_MSTANDBY)) {
1444 idlemode = HWMOD_IDLEMODE_FORCE; 1447 idlemode = HWMOD_IDLEMODE_FORCE;
1445 } else { 1448 } else {
1446 if (sf & SYSC_HAS_ENAWAKEUP) 1449 if (sf & SYSC_HAS_ENAWAKEUP)
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index d43d9b608eda..d5dc935f6060 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm {
427 * 427 *
428 * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out 428 * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out
429 * of idle, rather than relying on module smart-idle 429 * of idle, rather than relying on module smart-idle
430 * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out 430 * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and
431 * of standby, rather than relying on module smart-standby 431 * out of standby, rather than relying on module smart-standby
432 * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for 432 * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for
433 * SDRAM controller, etc. XXX probably belongs outside the main hwmod file 433 * SDRAM controller, etc. XXX probably belongs outside the main hwmod file
434 * XXX Should be HWMOD_SETUP_NO_RESET 434 * XXX Should be HWMOD_SETUP_NO_RESET
@@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm {
459 * correctly, or this is being abused to deal with some PM latency 459 * correctly, or this is being abused to deal with some PM latency
460 * issues -- but we're currently suffering from a shortage of 460 * issues -- but we're currently suffering from a shortage of
461 * folks who are able to track these issues down properly. 461 * folks who are able to track these issues down properly.
462 * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device
463 * is kept in force-standby mode. Failing to do so causes PM problems
464 * with musb on OMAP3630 at least. Note that musb has a dedicated register
465 * to control MSTANDBY signal when MIDLEMODE is set to force-standby.
462 */ 466 */
463#define HWMOD_SWSUP_SIDLE (1 << 0) 467#define HWMOD_SWSUP_SIDLE (1 << 0)
464#define HWMOD_SWSUP_MSTANDBY (1 << 1) 468#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm {
471#define HWMOD_16BIT_REG (1 << 8) 475#define HWMOD_16BIT_REG (1 << 8)
472#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) 476#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9)
473#define HWMOD_BLOCK_WFI (1 << 10) 477#define HWMOD_BLOCK_WFI (1 << 10)
478#define HWMOD_FORCE_MSTANDBY (1 << 11)
474 479
475/* 480/*
476 * omap_hwmod._int_flags definitions 481 * omap_hwmod._int_flags definitions
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index ac7e03ec952f..5112d04e7b79 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
1707 * Erratum ID: i479 idle_req / idle_ack mechanism potentially 1707 * Erratum ID: i479 idle_req / idle_ack mechanism potentially
1708 * broken when autoidle is enabled 1708 * broken when autoidle is enabled
1709 * workaround is to disable the autoidle bit at module level. 1709 * workaround is to disable the autoidle bit at module level.
1710 *
1711 * Enabling the device in any other MIDLEMODE setting but force-idle
1712 * causes core_pwrdm not enter idle states at least on OMAP3630.
1713 * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY
1714 * signal when MIDLEMODE is set to force-idle.
1710 */ 1715 */
1711 .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE 1716 .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
1712 | HWMOD_SWSUP_MSTANDBY, 1717 | HWMOD_FORCE_MSTANDBY,
1713}; 1718};
1714 1719
1715/* usb_otg_hs */ 1720/* usb_otg_hs */
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 0e47d2e1687c..9e0576569e07 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2714,6 +2714,10 @@ static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = {
2714 { } 2714 { }
2715}; 2715};
2716 2716
2717static struct omap_hwmod_opt_clk ocp2scp_usb_phy_opt_clks[] = {
2718 { .role = "48mhz", .clk = "ocp2scp_usb_phy_phy_48m" },
2719};
2720
2717/* ocp2scp_usb_phy */ 2721/* ocp2scp_usb_phy */
2718static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { 2722static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
2719 .name = "ocp2scp_usb_phy", 2723 .name = "ocp2scp_usb_phy",
@@ -2728,6 +2732,8 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
2728 }, 2732 },
2729 }, 2733 },
2730 .dev_attr = ocp2scp_dev_attr, 2734 .dev_attr = ocp2scp_dev_attr,
2735 .opt_clks = ocp2scp_usb_phy_opt_clks,
2736 .opt_clks_cnt = ARRAY_SIZE(ocp2scp_usb_phy_opt_clks),
2731}; 2737};
2732 2738
2733/* 2739/*
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 2bdd4cf17a8f..f62b509ed08d 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void)
547 clksrc_nr, clksrc_src) \ 547 clksrc_nr, clksrc_src) \
548void __init omap##name##_gptimer_timer_init(void) \ 548void __init omap##name##_gptimer_timer_init(void) \
549{ \ 549{ \
550 if (omap_clk_init) \
551 omap_clk_init(); \
550 omap_dmtimer_init(); \ 552 omap_dmtimer_init(); \
551 omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ 553 omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
552 omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \ 554 omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \
@@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void) \
556 clksrc_nr, clksrc_src) \ 558 clksrc_nr, clksrc_src) \
557void __init omap##name##_sync32k_timer_init(void) \ 559void __init omap##name##_sync32k_timer_init(void) \
558{ \ 560{ \
561 if (omap_clk_init) \
562 omap_clk_init(); \
559 omap_dmtimer_init(); \ 563 omap_dmtimer_init(); \
560 omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ 564 omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
561 /* Enable the use of clocksource="gp_timer" kernel parameter */ \ 565 /* Enable the use of clocksource="gp_timer" kernel parameter */ \
diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h
index b7a9f4d469e8..1e73f5fa8659 100644
--- a/arch/arm/mach-s3c24xx/include/mach/irqs.h
+++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h
@@ -188,10 +188,8 @@
188 188
189#if defined(CONFIG_CPU_S3C2416) 189#if defined(CONFIG_CPU_S3C2416)
190#define NR_IRQS (IRQ_S3C2416_I2S1 + 1) 190#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
191#elif defined(CONFIG_CPU_S3C2443)
192#define NR_IRQS (IRQ_S3C2443_AC97+1)
193#else 191#else
194#define NR_IRQS (IRQ_S3C2440_AC97+1) 192#define NR_IRQS (IRQ_S3C2443_AC97 + 1)
195#endif 193#endif
196 194
197/* compatibility define. */ 195/* compatibility define. */
diff --git a/arch/arm/mach-s3c24xx/irq.c b/arch/arm/mach-s3c24xx/irq.c
index cb9f5e011e73..d8ba9bee4c7e 100644
--- a/arch/arm/mach-s3c24xx/irq.c
+++ b/arch/arm/mach-s3c24xx/irq.c
@@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np,
500 base = (void *)0xfd000000; 500 base = (void *)0xfd000000;
501 501
502 intc->reg_mask = base + 0xa4; 502 intc->reg_mask = base + 0xa4;
503 intc->reg_pending = base + 0x08; 503 intc->reg_pending = base + 0xa8;
504 irq_num = 20; 504 irq_num = 20;
505 irq_start = S3C2410_IRQ(32); 505 irq_start = S3C2410_IRQ(32);
506 irq_offset = 4; 506 irq_offset = 4;
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
index 051b62c27102..7f2cb6c5e2c1 100644
--- a/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
81#endif 81#endif
82 82
83struct mmci_platform_data mop500_sdi0_data = { 83struct mmci_platform_data mop500_sdi0_data = {
84 .ios_handler = mop500_sdi0_ios_handler,
85 .ocr_mask = MMC_VDD_29_30, 84 .ocr_mask = MMC_VDD_29_30,
86 .f_max = 50000000, 85 .f_max = 50000000,
87 .capabilities = MMC_CAP_4_BIT_DATA | 86 .capabilities = MMC_CAP_4_BIT_DATA |
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index b03457881c4b..87d2d7b38ce9 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/clk.h>
15#include <linux/io.h> 16#include <linux/io.h>
16#include <linux/i2c.h> 17#include <linux/i2c.h>
17#include <linux/platform_data/i2c-nomadik.h> 18#include <linux/platform_data/i2c-nomadik.h>
@@ -439,6 +440,15 @@ static void mop500_prox_deactivate(struct device *dev)
439 regulator_put(prox_regulator); 440 regulator_put(prox_regulator);
440} 441}
441 442
443void mop500_snowball_ethernet_clock_enable(void)
444{
445 struct clk *clk;
446
447 clk = clk_get_sys("fsmc", NULL);
448 if (!IS_ERR(clk))
449 clk_prepare_enable(clk);
450}
451
442static struct cryp_platform_data u8500_cryp1_platform_data = { 452static struct cryp_platform_data u8500_cryp1_platform_data = {
443 .mem_to_engine = { 453 .mem_to_engine = {
444 .dir = STEDMA40_MEM_TO_PERIPH, 454 .dir = STEDMA40_MEM_TO_PERIPH,
@@ -683,6 +693,8 @@ static void __init snowball_init_machine(void)
683 mop500_audio_init(parent); 693 mop500_audio_init(parent);
684 mop500_uart_init(parent); 694 mop500_uart_init(parent);
685 695
696 mop500_snowball_ethernet_clock_enable();
697
686 /* This board has full regulator constraints */ 698 /* This board has full regulator constraints */
687 regulator_has_full_constraints(); 699 regulator_has_full_constraints();
688} 700}
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
index eaa605f5d90d..d38951be70df 100644
--- a/arch/arm/mach-ux500/board-mop500.h
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void);
104void __init snowball_pinmaps_init(void); 104void __init snowball_pinmaps_init(void);
105void __init hrefv60_pinmaps_init(void); 105void __init hrefv60_pinmaps_init(void);
106void mop500_audio_init(struct device *parent); 106void mop500_audio_init(struct device *parent);
107void mop500_snowball_ethernet_clock_enable(void);
107 108
108int __init mop500_uib_init(void); 109int __init mop500_uib_init(void);
109void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, 110void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 19235cf7bbe3..f1a581844372 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -312,9 +312,10 @@ static void __init u8500_init_machine(void)
312 /* Pinmaps must be in place before devices register */ 312 /* Pinmaps must be in place before devices register */
313 if (of_machine_is_compatible("st-ericsson,mop500")) 313 if (of_machine_is_compatible("st-ericsson,mop500"))
314 mop500_pinmaps_init(); 314 mop500_pinmaps_init();
315 else if (of_machine_is_compatible("calaosystems,snowball-a9500")) 315 else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
316 snowball_pinmaps_init(); 316 snowball_pinmaps_init();
317 else if (of_machine_is_compatible("st-ericsson,hrefv60+")) 317 mop500_snowball_ethernet_clock_enable();
318 } else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
318 hrefv60_pinmaps_init(); 319 hrefv60_pinmaps_init();
319 else if (of_machine_is_compatible("st-ericsson,ccu9540")) {} 320 else if (of_machine_is_compatible("st-ericsson,ccu9540")) {}
320 /* TODO: Add pinmaps for ccu9540 board. */ 321 /* TODO: Add pinmaps for ccu9540 board. */
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c2f37390308a..c465faca51b0 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)
299 int lockregs; 299 int lockregs;
300 int i; 300 int i;
301 301
302 switch (cache_id) { 302 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
303 case L2X0_CACHE_ID_PART_L310: 303 case L2X0_CACHE_ID_PART_L310:
304 lockregs = 8; 304 lockregs = 8;
305 break; 305 break;
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
333 if (cache_id_part_number_from_dt) 333 if (cache_id_part_number_from_dt)
334 cache_id = cache_id_part_number_from_dt; 334 cache_id = cache_id_part_number_from_dt;
335 else 335 else
336 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) 336 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
337 & L2X0_CACHE_ID_PART_MASK;
338 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 337 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
339 338
340 aux &= aux_mask; 339 aux &= aux_mask;
341 aux |= aux_val; 340 aux |= aux_val;
342 341
343 /* Determine the number of ways */ 342 /* Determine the number of ways */
344 switch (cache_id) { 343 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
345 case L2X0_CACHE_ID_PART_L310: 344 case L2X0_CACHE_ID_PART_L310:
346 if (aux & (1 << 16)) 345 if (aux & (1 << 16))
347 ways = 16; 346 ways = 16;
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = {
725 .flush_all = l2x0_flush_all, 724 .flush_all = l2x0_flush_all,
726 .inv_all = l2x0_inv_all, 725 .inv_all = l2x0_inv_all,
727 .disable = l2x0_disable, 726 .disable = l2x0_disable,
728 .set_debug = pl310_set_debug,
729 }, 727 },
730}; 728};
731 729
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
814 data->save(); 812 data->save();
815 813
816 of_init = true; 814 of_init = true;
817 l2x0_init(l2x0_base, aux_val, aux_mask);
818
819 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); 815 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
816 l2x0_init(l2x0_base, aux_val, aux_mask);
820 817
821 return 0; 818 return 0;
822} 819}
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index a5a4b2bc42ba..2ac37372ef52 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
48static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); 48static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
49static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); 49static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
50 50
51static DEFINE_PER_CPU(atomic64_t, active_asids); 51DEFINE_PER_CPU(atomic64_t, active_asids);
52static DEFINE_PER_CPU(u64, reserved_asids); 52static DEFINE_PER_CPU(u64, reserved_asids);
53static cpumask_t tlb_flush_pending; 53static cpumask_t tlb_flush_pending;
54 54
@@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
215 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { 215 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
216 local_flush_bp_all(); 216 local_flush_bp_all();
217 local_flush_tlb_all(); 217 local_flush_tlb_all();
218 dummy_flush_tlb_a15_erratum();
218 } 219 }
219 220
220 atomic64_set(&per_cpu(active_asids, cpu), asid); 221 atomic64_set(&per_cpu(active_asids, cpu), asid);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e95a996ab78f..78978945492a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -598,39 +598,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
598 } while (pte++, addr += PAGE_SIZE, addr != end); 598 } while (pte++, addr += PAGE_SIZE, addr != end);
599} 599}
600 600
601static void __init alloc_init_section(pud_t *pud, unsigned long addr, 601static void __init map_init_section(pmd_t *pmd, unsigned long addr,
602 unsigned long end, phys_addr_t phys, 602 unsigned long end, phys_addr_t phys,
603 const struct mem_type *type) 603 const struct mem_type *type)
604{ 604{
605 pmd_t *pmd = pmd_offset(pud, addr); 605#ifndef CONFIG_ARM_LPAE
606
607 /* 606 /*
608 * Try a section mapping - end, addr and phys must all be aligned 607 * In classic MMU format, puds and pmds are folded in to
609 * to a section boundary. Note that PMDs refer to the individual 608 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
610 * L1 entries, whereas PGDs refer to a group of L1 entries making 609 * group of L1 entries making up one logical pointer to
611 * up one logical pointer to an L2 table. 610 * an L2 table (2MB), where as PMDs refer to the individual
611 * L1 entries (1MB). Hence increment to get the correct
612 * offset for odd 1MB sections.
613 * (See arch/arm/include/asm/pgtable-2level.h)
612 */ 614 */
613 if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { 615 if (addr & SECTION_SIZE)
614 pmd_t *p = pmd; 616 pmd++;
615
616#ifndef CONFIG_ARM_LPAE
617 if (addr & SECTION_SIZE)
618 pmd++;
619#endif 617#endif
618 do {
619 *pmd = __pmd(phys | type->prot_sect);
620 phys += SECTION_SIZE;
621 } while (pmd++, addr += SECTION_SIZE, addr != end);
620 622
621 do { 623 flush_pmd_entry(pmd);
622 *pmd = __pmd(phys | type->prot_sect); 624}
623 phys += SECTION_SIZE;
624 } while (pmd++, addr += SECTION_SIZE, addr != end);
625 625
626 flush_pmd_entry(p); 626static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
627 } else { 627 unsigned long end, phys_addr_t phys,
628 const struct mem_type *type)
629{
630 pmd_t *pmd = pmd_offset(pud, addr);
631 unsigned long next;
632
633 do {
628 /* 634 /*
629 * No need to loop; pte's aren't interested in the 635 * With LPAE, we must loop over to map
630 * individual L1 entries. 636 * all the pmds for the given range.
631 */ 637 */
632 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); 638 next = pmd_addr_end(addr, end);
633 } 639
640 /*
641 * Try a section mapping - addr, next and phys must all be
642 * aligned to a section boundary.
643 */
644 if (type->prot_sect &&
645 ((addr | next | phys) & ~SECTION_MASK) == 0) {
646 map_init_section(pmd, addr, next, phys, type);
647 } else {
648 alloc_init_pte(pmd, addr, next,
649 __phys_to_pfn(phys), type);
650 }
651
652 phys += next - addr;
653
654 } while (pmd++, addr = next, addr != end);
634} 655}
635 656
636static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, 657static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -641,7 +662,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
641 662
642 do { 663 do {
643 next = pud_addr_end(addr, end); 664 next = pud_addr_end(addr, end);
644 alloc_init_section(pud, addr, next, phys, type); 665 alloc_init_pmd(pud, addr, next, phys, type);
645 phys += next - addr; 666 phys += next - addr;
646 } while (pud++, addr = next, addr != end); 667 } while (pud++, addr = next, addr != end);
647} 668}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3a3c015f8d5c..f584d3f5b37c 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -420,7 +420,7 @@ __v7_pj4b_proc_info:
420__v7_ca7mp_proc_info: 420__v7_ca7mp_proc_info:
421 .long 0x410fc070 421 .long 0x410fc070
422 .long 0xff0ffff0 422 .long 0xff0ffff0
423 __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV 423 __v7_proc __v7_ca7mp_setup
424 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info 424 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
425 425
426 /* 426 /*
@@ -430,10 +430,25 @@ __v7_ca7mp_proc_info:
430__v7_ca15mp_proc_info: 430__v7_ca15mp_proc_info:
431 .long 0x410fc0f0 431 .long 0x410fc0f0
432 .long 0xff0ffff0 432 .long 0xff0ffff0
433 __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV 433 __v7_proc __v7_ca15mp_setup
434 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info 434 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
435 435
436 /* 436 /*
437 * Qualcomm Inc. Krait processors.
438 */
439 .type __krait_proc_info, #object
440__krait_proc_info:
441 .long 0x510f0400 @ Required ID value
442 .long 0xff0ffc00 @ Mask for ID
443 /*
444 * Some Krait processors don't indicate support for SDIV and UDIV
445 * instructions in the ARM instruction set, even though they actually
446 * do support them.
447 */
448 __v7_proc __v7_setup, hwcaps = HWCAP_IDIV
449 .size __krait_proc_info, . - __krait_proc_info
450
451 /*
437 * Match any ARMv7 processor core. 452 * Match any ARMv7 processor core.
438 */ 453 */
439 .type __v7_proc_info, #object 454 .type __v7_proc_info, #object
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 224b44ab534e..70b8cd4021c4 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
261void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) 261void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
262{ 262{
263 unsigned long size, mask; 263 unsigned long size, mask;
264 bool page64k = IS_ENABLED(ARM64_64K_PAGES); 264 bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
265 pgd_t *pgd; 265 pgd_t *pgd;
266 pud_t *pud; 266 pud_t *pud;
267 pmd_t *pmd; 267 pmd_t *pmd;
diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h
index cf78e09e18c3..2c71d5634ec2 100644
--- a/arch/c6x/include/asm/irqflags.h
+++ b/arch/c6x/include/asm/irqflags.h
@@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void)
27/* set interrupt enabled status */ 27/* set interrupt enabled status */
28static inline void arch_local_irq_restore(unsigned long flags) 28static inline void arch_local_irq_restore(unsigned long flags)
29{ 29{
30 asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags)); 30 asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory");
31} 31}
32 32
33/* unconditionally enable interrupts */ 33/* unconditionally enable interrupts */
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 77597e5ea60a..79521d5499f9 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={
849 849
850#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) 850#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
851 851
852/*
853 * this array is used to keep track of the proc entries we create. This is
854 * required in the module mode when we need to remove all entries. The procfs code
855 * does not do recursion of deletion
856 *
857 * Notes:
858 * - +1 accounts for the cpuN directory entry in /proc/pal
859 */
860#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
861
862static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
863static struct proc_dir_entry *palinfo_dir; 852static struct proc_dir_entry *palinfo_dir;
864 853
865/* 854/*
@@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
971static void __cpuinit 960static void __cpuinit
972create_palinfo_proc_entries(unsigned int cpu) 961create_palinfo_proc_entries(unsigned int cpu)
973{ 962{
974# define CPUSTR "cpu%d"
975
976 pal_func_cpu_u_t f; 963 pal_func_cpu_u_t f;
977 struct proc_dir_entry **pdir;
978 struct proc_dir_entry *cpu_dir; 964 struct proc_dir_entry *cpu_dir;
979 int j; 965 int j;
980 char cpustr[sizeof(CPUSTR)]; 966 char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
981 967 sprintf(cpustr, "cpu%d", cpu);
982
983 /*
984 * we keep track of created entries in a depth-first order for
985 * cleanup purposes. Each entry is stored into palinfo_proc_entries
986 */
987 sprintf(cpustr,CPUSTR, cpu);
988 968
989 cpu_dir = proc_mkdir(cpustr, palinfo_dir); 969 cpu_dir = proc_mkdir(cpustr, palinfo_dir);
970 if (!cpu_dir)
971 return;
990 972
991 f.req_cpu = cpu; 973 f.req_cpu = cpu;
992 974
993 /*
994 * Compute the location to store per cpu entries
995 * We dont store the top level entry in this list, but
996 * remove it finally after removing all cpu entries.
997 */
998 pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
999 *pdir++ = cpu_dir;
1000 for (j=0; j < NR_PALINFO_ENTRIES; j++) { 975 for (j=0; j < NR_PALINFO_ENTRIES; j++) {
1001 f.func_id = j; 976 f.func_id = j;
1002 *pdir = create_proc_read_entry( 977 create_proc_read_entry(
1003 palinfo_entries[j].name, 0, cpu_dir, 978 palinfo_entries[j].name, 0, cpu_dir,
1004 palinfo_read_entry, (void *)f.value); 979 palinfo_read_entry, (void *)f.value);
1005 pdir++;
1006 } 980 }
1007} 981}
1008 982
1009static void 983static void
1010remove_palinfo_proc_entries(unsigned int hcpu) 984remove_palinfo_proc_entries(unsigned int hcpu)
1011{ 985{
1012 int j; 986 char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
1013 struct proc_dir_entry *cpu_dir, **pdir; 987 sprintf(cpustr, "cpu%d", hcpu);
1014 988 remove_proc_subtree(cpustr, palinfo_dir);
1015 pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
1016 cpu_dir = *pdir;
1017 *pdir++=NULL;
1018 for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
1019 if ((*pdir)) {
1020 remove_proc_entry ((*pdir)->name, cpu_dir);
1021 *pdir ++= NULL;
1022 }
1023 }
1024
1025 if (cpu_dir) {
1026 remove_proc_entry(cpu_dir->name, palinfo_dir);
1027 }
1028} 989}
1029 990
1030static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, 991static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
@@ -1058,6 +1019,8 @@ palinfo_init(void)
1058 1019
1059 printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); 1020 printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
1060 palinfo_dir = proc_mkdir("pal", NULL); 1021 palinfo_dir = proc_mkdir("pal", NULL);
1022 if (!palinfo_dir)
1023 return -ENOMEM;
1061 1024
1062 /* Create palinfo dirs in /proc for all online cpus */ 1025 /* Create palinfo dirs in /proc for all online cpus */
1063 for_each_online_cpu(i) { 1026 for_each_online_cpu(i) {
@@ -1073,22 +1036,8 @@ palinfo_init(void)
1073static void __exit 1036static void __exit
1074palinfo_exit(void) 1037palinfo_exit(void)
1075{ 1038{
1076 int i = 0;
1077
1078 /* remove all nodes: depth first pass. Could optimize this */
1079 for_each_online_cpu(i) {
1080 remove_palinfo_proc_entries(i);
1081 }
1082
1083 /*
1084 * Remove the top level entry finally
1085 */
1086 remove_proc_entry(palinfo_dir->name, NULL);
1087
1088 /*
1089 * Unregister from cpu notifier callbacks
1090 */
1091 unregister_hotcpu_notifier(&palinfo_cpu_notifier); 1039 unregister_hotcpu_notifier(&palinfo_cpu_notifier);
1040 remove_proc_subtree("pal", NULL);
1092} 1041}
1093 1042
1094module_init(palinfo_init); 1043module_init(palinfo_init);
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index e34f565f595a..6f7dc8b7b35c 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -291,7 +291,6 @@ cpu_idle (void)
291 } 291 }
292 292
293 if (!need_resched()) { 293 if (!need_resched()) {
294 void (*idle)(void);
295#ifdef CONFIG_SMP 294#ifdef CONFIG_SMP
296 min_xtp(); 295 min_xtp();
297#endif 296#endif
@@ -299,9 +298,7 @@ cpu_idle (void)
299 if (mark_idle) 298 if (mark_idle)
300 (*mark_idle)(1); 299 (*mark_idle)(1);
301 300
302 if (!idle) 301 default_idle();
303 idle = default_idle;
304 (*idle)();
305 if (mark_idle) 302 if (mark_idle)
306 (*mark_idle)(0); 303 (*mark_idle)(0);
307#ifdef CONFIG_SMP 304#ifdef CONFIG_SMP
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
index 4395ffc51fdb..8cc83431805b 100644
--- a/arch/m68k/include/asm/gpio.h
+++ b/arch/m68k/include/asm/gpio.h
@@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio)
86 return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); 86 return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
87} 87}
88 88
89static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
90{
91 int err;
92
93 err = gpio_request(gpio, label);
94 if (err)
95 return err;
96
97 if (flags & GPIOF_DIR_IN)
98 err = gpio_direction_input(gpio);
99 else
100 err = gpio_direction_output(gpio,
101 (flags & GPIOF_INIT_HIGH) ? 1 : 0);
102
103 if (err)
104 gpio_free(gpio);
105
106 return err;
107}
108
89#endif 109#endif
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index cd2e21ff562a..51244bf97271 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -18,7 +18,7 @@ config MIPS
18 select HAVE_KRETPROBES 18 select HAVE_KRETPROBES
19 select HAVE_DEBUG_KMEMLEAK 19 select HAVE_DEBUG_KMEMLEAK
20 select ARCH_BINFMT_ELF_RANDOMIZE_PIE 20 select ARCH_BINFMT_ELF_RANDOMIZE_PIE
21 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 21 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
22 select RTC_LIB if !MACH_LOONGSON 22 select RTC_LIB if !MACH_LOONGSON
23 select GENERIC_ATOMIC64 if !64BIT 23 select GENERIC_ATOMIC64 if !64BIT
24 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 24 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -657,7 +657,7 @@ config SNI_RM
657 bool "SNI RM200/300/400" 657 bool "SNI RM200/300/400"
658 select FW_ARC if CPU_LITTLE_ENDIAN 658 select FW_ARC if CPU_LITTLE_ENDIAN
659 select FW_ARC32 if CPU_LITTLE_ENDIAN 659 select FW_ARC32 if CPU_LITTLE_ENDIAN
660 select SNIPROM if CPU_BIG_ENDIAN 660 select FW_SNIPROM if CPU_BIG_ENDIAN
661 select ARCH_MAY_HAVE_PC_FDC 661 select ARCH_MAY_HAVE_PC_FDC
662 select BOOT_ELF32 662 select BOOT_ELF32
663 select CEVT_R4K 663 select CEVT_R4K
@@ -1144,7 +1144,7 @@ config DEFAULT_SGI_PARTITION
1144config FW_ARC32 1144config FW_ARC32
1145 bool 1145 bool
1146 1146
1147config SNIPROM 1147config FW_SNIPROM
1148 bool 1148 bool
1149 1149
1150config BOOT_ELF32 1150config BOOT_ELF32
@@ -1493,7 +1493,6 @@ config CPU_XLP
1493 select CPU_SUPPORTS_32BIT_KERNEL 1493 select CPU_SUPPORTS_32BIT_KERNEL
1494 select CPU_SUPPORTS_64BIT_KERNEL 1494 select CPU_SUPPORTS_64BIT_KERNEL
1495 select CPU_SUPPORTS_HIGHMEM 1495 select CPU_SUPPORTS_HIGHMEM
1496 select CPU_HAS_LLSC
1497 select WEAK_ORDERING 1496 select WEAK_ORDERING
1498 select WEAK_REORDERING_BEYOND_LLSC 1497 select WEAK_REORDERING_BEYOND_LLSC
1499 select CPU_HAS_PREFETCH 1498 select CPU_HAS_PREFETCH
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index ed1949c29508..9aa7d44898ed 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -745,10 +745,7 @@ void __init board_prom_init(void)
745 strcpy(cfe_version, "unknown"); 745 strcpy(cfe_version, "unknown");
746 printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); 746 printk(KERN_INFO PFX "CFE version: %s\n", cfe_version);
747 747
748 if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) { 748 bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET);
749 printk(KERN_ERR PFX "invalid nvram checksum\n");
750 return;
751 }
752 749
753 board_name = bcm63xx_nvram_get_name(); 750 board_name = bcm63xx_nvram_get_name();
754 /* find board by name */ 751 /* find board by name */
diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c
index 620611680839..a4b8864f9307 100644
--- a/arch/mips/bcm63xx/nvram.c
+++ b/arch/mips/bcm63xx/nvram.c
@@ -38,7 +38,7 @@ struct bcm963xx_nvram {
38static struct bcm963xx_nvram nvram; 38static struct bcm963xx_nvram nvram;
39static int mac_addr_used; 39static int mac_addr_used;
40 40
41int __init bcm63xx_nvram_init(void *addr) 41void __init bcm63xx_nvram_init(void *addr)
42{ 42{
43 unsigned int check_len; 43 unsigned int check_len;
44 u32 crc, expected_crc; 44 u32 crc, expected_crc;
@@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr)
60 crc = crc32_le(~0, (u8 *)&nvram, check_len); 60 crc = crc32_le(~0, (u8 *)&nvram, check_len);
61 61
62 if (crc != expected_crc) 62 if (crc != expected_crc)
63 return -EINVAL; 63 pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
64 64 expected_crc, crc);
65 return 0;
66} 65}
67 66
68u8 *bcm63xx_nvram_get_name(void) 67u8 *bcm63xx_nvram_get_name(void)
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
index 314231be788c..35e18e98beb9 100644
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void)
157 return board_register_devices(); 157 return board_register_devices();
158} 158}
159 159
160device_initcall(bcm63xx_register_devices); 160arch_initcall(bcm63xx_register_devices);
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index c594a3d4f743..b0baa299f899 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image)
174 174
175static void octeon_generic_shutdown(void) 175static void octeon_generic_shutdown(void)
176{ 176{
177 int cpu, i; 177 int i;
178#ifdef CONFIG_SMP
179 int cpu;
180#endif
178 struct cvmx_bootmem_desc *bootmem_desc; 181 struct cvmx_bootmem_desc *bootmem_desc;
179 void *named_block_array_ptr; 182 void *named_block_array_ptr;
180 183
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
index 62d6a3b4d3b7..4e0b6bc1165e 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
@@ -9,10 +9,8 @@
9 * 9 *
10 * Initialized the local nvram copy from the target address and checks 10 * Initialized the local nvram copy from the target address and checks
11 * its checksum. 11 * its checksum.
12 *
13 * Returns 0 on success.
14 */ 12 */
15int __init bcm63xx_nvram_init(void *nvram); 13void bcm63xx_nvram_init(void *nvram);
16 14
17/** 15/**
18 * bcm63xx_nvram_get_name() - returns the board name according to nvram 16 * bcm63xx_nvram_get_name() - returns the board name according to nvram
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
index d9c828419037..193c0912d38e 100644
--- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
@@ -28,11 +28,7 @@
28/* #define cpu_has_prefetch ? */ 28/* #define cpu_has_prefetch ? */
29#define cpu_has_mcheck 1 29#define cpu_has_mcheck 1
30/* #define cpu_has_ejtag ? */ 30/* #define cpu_has_ejtag ? */
31#ifdef CONFIG_CPU_HAS_LLSC
32#define cpu_has_llsc 1 31#define cpu_has_llsc 1
33#else
34#define cpu_has_llsc 0
35#endif
36/* #define cpu_has_vtag_icache ? */ 32/* #define cpu_has_vtag_icache ? */
37/* #define cpu_has_dc_aliases ? */ 33/* #define cpu_has_dc_aliases ? */
38/* #define cpu_has_ic_fills_f_dc ? */ 34/* #define cpu_has_ic_fills_f_dc ? */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 12b70c25906a..0da44d422f5b 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1166,7 +1166,10 @@ do { \
1166 unsigned int __dspctl; \ 1166 unsigned int __dspctl; \
1167 \ 1167 \
1168 __asm__ __volatile__( \ 1168 __asm__ __volatile__( \
1169 " .set push \n" \
1170 " .set dsp \n" \
1169 " rddsp %0, %x1 \n" \ 1171 " rddsp %0, %x1 \n" \
1172 " .set pop \n" \
1170 : "=r" (__dspctl) \ 1173 : "=r" (__dspctl) \
1171 : "i" (mask)); \ 1174 : "i" (mask)); \
1172 __dspctl; \ 1175 __dspctl; \
@@ -1175,30 +1178,198 @@ do { \
1175#define wrdsp(val, mask) \ 1178#define wrdsp(val, mask) \
1176do { \ 1179do { \
1177 __asm__ __volatile__( \ 1180 __asm__ __volatile__( \
1181 " .set push \n" \
1182 " .set dsp \n" \
1178 " wrdsp %0, %x1 \n" \ 1183 " wrdsp %0, %x1 \n" \
1184 " .set pop \n" \
1179 : \ 1185 : \
1180 : "r" (val), "i" (mask)); \ 1186 : "r" (val), "i" (mask)); \
1181} while (0) 1187} while (0)
1182 1188
1183#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;}) 1189#define mflo0() \
1184#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;}) 1190({ \
1185#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;}) 1191 long mflo0; \
1186#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;}) 1192 __asm__( \
1187 1193 " .set push \n" \
1188#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;}) 1194 " .set dsp \n" \
1189#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;}) 1195 " mflo %0, $ac0 \n" \
1190#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;}) 1196 " .set pop \n" \
1191#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;}) 1197 : "=r" (mflo0)); \
1192 1198 mflo0; \
1193#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x)) 1199})
1194#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x)) 1200
1195#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x)) 1201#define mflo1() \
1196#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x)) 1202({ \
1197 1203 long mflo1; \
1198#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x)) 1204 __asm__( \
1199#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x)) 1205 " .set push \n" \
1200#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x)) 1206 " .set dsp \n" \
1201#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x)) 1207 " mflo %0, $ac1 \n" \
1208 " .set pop \n" \
1209 : "=r" (mflo1)); \
1210 mflo1; \
1211})
1212
1213#define mflo2() \
1214({ \
1215 long mflo2; \
1216 __asm__( \
1217 " .set push \n" \
1218 " .set dsp \n" \
1219 " mflo %0, $ac2 \n" \
1220 " .set pop \n" \
1221 : "=r" (mflo2)); \
1222 mflo2; \
1223})
1224
1225#define mflo3() \
1226({ \
1227 long mflo3; \
1228 __asm__( \
1229 " .set push \n" \
1230 " .set dsp \n" \
1231 " mflo %0, $ac3 \n" \
1232 " .set pop \n" \
1233 : "=r" (mflo3)); \
1234 mflo3; \
1235})
1236
1237#define mfhi0() \
1238({ \
1239 long mfhi0; \
1240 __asm__( \
1241 " .set push \n" \
1242 " .set dsp \n" \
1243 " mfhi %0, $ac0 \n" \
1244 " .set pop \n" \
1245 : "=r" (mfhi0)); \
1246 mfhi0; \
1247})
1248
1249#define mfhi1() \
1250({ \
1251 long mfhi1; \
1252 __asm__( \
1253 " .set push \n" \
1254 " .set dsp \n" \
1255 " mfhi %0, $ac1 \n" \
1256 " .set pop \n" \
1257 : "=r" (mfhi1)); \
1258 mfhi1; \
1259})
1260
1261#define mfhi2() \
1262({ \
1263 long mfhi2; \
1264 __asm__( \
1265 " .set push \n" \
1266 " .set dsp \n" \
1267 " mfhi %0, $ac2 \n" \
1268 " .set pop \n" \
1269 : "=r" (mfhi2)); \
1270 mfhi2; \
1271})
1272
1273#define mfhi3() \
1274({ \
1275 long mfhi3; \
1276 __asm__( \
1277 " .set push \n" \
1278 " .set dsp \n" \
1279 " mfhi %0, $ac3 \n" \
1280 " .set pop \n" \
1281 : "=r" (mfhi3)); \
1282 mfhi3; \
1283})
1284
1285
1286#define mtlo0(x) \
1287({ \
1288 __asm__( \
1289 " .set push \n" \
1290 " .set dsp \n" \
1291 " mtlo %0, $ac0 \n" \
1292 " .set pop \n" \
1293 : \
1294 : "r" (x)); \
1295})
1296
1297#define mtlo1(x) \
1298({ \
1299 __asm__( \
1300 " .set push \n" \
1301 " .set dsp \n" \
1302 " mtlo %0, $ac1 \n" \
1303 " .set pop \n" \
1304 : \
1305 : "r" (x)); \
1306})
1307
1308#define mtlo2(x) \
1309({ \
1310 __asm__( \
1311 " .set push \n" \
1312 " .set dsp \n" \
1313 " mtlo %0, $ac2 \n" \
1314 " .set pop \n" \
1315 : \
1316 : "r" (x)); \
1317})
1318
1319#define mtlo3(x) \
1320({ \
1321 __asm__( \
1322 " .set push \n" \
1323 " .set dsp \n" \
1324 " mtlo %0, $ac3 \n" \
1325 " .set pop \n" \
1326 : \
1327 : "r" (x)); \
1328})
1329
1330#define mthi0(x) \
1331({ \
1332 __asm__( \
1333 " .set push \n" \
1334 " .set dsp \n" \
1335 " mthi %0, $ac0 \n" \
1336 " .set pop \n" \
1337 : \
1338 : "r" (x)); \
1339})
1340
1341#define mthi1(x) \
1342({ \
1343 __asm__( \
1344 " .set push \n" \
1345 " .set dsp \n" \
1346 " mthi %0, $ac1 \n" \
1347 " .set pop \n" \
1348 : \
1349 : "r" (x)); \
1350})
1351
1352#define mthi2(x) \
1353({ \
1354 __asm__( \
1355 " .set push \n" \
1356 " .set dsp \n" \
1357 " mthi %0, $ac2 \n" \
1358 " .set pop \n" \
1359 : \
1360 : "r" (x)); \
1361})
1362
1363#define mthi3(x) \
1364({ \
1365 __asm__( \
1366 " .set push \n" \
1367 " .set dsp \n" \
1368 " mthi %0, $ac3 \n" \
1369 " .set pop \n" \
1370 : \
1371 : "r" (x)); \
1372})
1202 1373
1203#else 1374#else
1204 1375
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h
index 197f6367c201..8efe5a9e2c3e 100644
--- a/arch/mips/include/asm/signal.h
+++ b/arch/mips/include/asm/signal.h
@@ -21,6 +21,6 @@
21#include <asm/sigcontext.h> 21#include <asm/sigcontext.h>
22#include <asm/siginfo.h> 22#include <asm/siginfo.h>
23 23
24#define __ARCH_HAS_ODD_SIGACTION 24#define __ARCH_HAS_IRIX_SIGACTION
25 25
26#endif /* _ASM_SIGNAL_H */ 26#endif /* _ASM_SIGNAL_H */
diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h
index d6b18b4d0f3a..addb9f556b71 100644
--- a/arch/mips/include/uapi/asm/signal.h
+++ b/arch/mips/include/uapi/asm/signal.h
@@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
72 * 72 *
73 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single 73 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
74 * Unix names RESETHAND and NODEFER respectively. 74 * Unix names RESETHAND and NODEFER respectively.
75 *
76 * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever
77 * supported its use and no libc was using it, so the entire sa-restorer
78 * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48
79 * retaining only the SA_RESTORER definition as a reminder to avoid
80 * accidental reuse of the mask bit.
75 */ 81 */
76#define SA_ONSTACK 0x08000000 82#define SA_ONSTACK 0x08000000
77#define SA_RESETHAND 0x80000000 83#define SA_RESETHAND 0x80000000
@@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
84#define SA_NOMASK SA_NODEFER 90#define SA_NOMASK SA_NODEFER
85#define SA_ONESHOT SA_RESETHAND 91#define SA_ONESHOT SA_RESETHAND
86 92
87#define SA_RESTORER 0x04000000 /* Only for o32 */
88
89#define MINSIGSTKSZ 2048 93#define MINSIGSTKSZ 2048
90#define SIGSTKSZ 8192 94#define SIGSTKSZ 8192
91 95
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index f81d98f6184c..de75fb50562b 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
100obj-$(CONFIG_JUMP_LABEL) += jump_label.o 100obj-$(CONFIG_JUMP_LABEL) += jump_label.o
101 101
102# 102#
103# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe 103# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
104# to enable DSP assembler support here even if the MIPS Release 2 CPU we 104# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
105# are targetting does not support DSP because all code-paths making use of 105# here because the compiler may use DSP ASE instructions (such as lwx) in
106# it properly check that the running CPU *actually does* support these 106# code paths where we cannot check that the CPU we are running on supports it.
107# instructions. 107# Proper abstraction using HAVE_AS_DSP and macros is done in
108# arch/mips/include/asm/mipsregs.h.
108# 109#
109ifeq ($(CONFIG_CPU_MIPSR2), y) 110ifeq ($(CONFIG_CPU_MIPSR2), y)
110CFLAGS_DSP = -DHAVE_AS_DSP 111CFLAGS_DSP = -DHAVE_AS_DSP
111 112
112#
113# Check if assembler supports DSP ASE
114#
115ifeq ($(call cc-option-yn,-mdsp), y)
116CFLAGS_DSP += -mdsp
117endif
118
119#
120# Check if assembler supports DSP ASE Rev2
121#
122ifeq ($(call cc-option-yn,-mdspr2), y)
123CFLAGS_DSP += -mdspr2
124endif
125
126CFLAGS_signal.o = $(CFLAGS_DSP) 113CFLAGS_signal.o = $(CFLAGS_DSP)
127CFLAGS_signal32.o = $(CFLAGS_DSP) 114CFLAGS_signal32.o = $(CFLAGS_DSP)
128CFLAGS_process.o = $(CFLAGS_DSP) 115CFLAGS_process.o = $(CFLAGS_DSP)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 6bfccc227a95..5fe66a0c3224 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
580 c->tlbsize = 48; 580 c->tlbsize = 48;
581 break; 581 break;
582 case PRID_IMP_VR41XX: 582 case PRID_IMP_VR41XX:
583 set_isa(c, MIPS_CPU_ISA_III);
584 c->options = R4K_OPTS;
585 c->tlbsize = 32;
583 switch (c->processor_id & 0xf0) { 586 switch (c->processor_id & 0xf0) {
584 case PRID_REV_VR4111: 587 case PRID_REV_VR4111:
585 c->cputype = CPU_VR4111; 588 c->cputype = CPU_VR4111;
@@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
604 __cpu_name[cpu] = "NEC VR4131"; 607 __cpu_name[cpu] = "NEC VR4131";
605 } else { 608 } else {
606 c->cputype = CPU_VR4133; 609 c->cputype = CPU_VR4133;
610 c->options |= MIPS_CPU_LLSC;
607 __cpu_name[cpu] = "NEC VR4133"; 611 __cpu_name[cpu] = "NEC VR4133";
608 } 612 }
609 break; 613 break;
@@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
613 __cpu_name[cpu] = "NEC Vr41xx"; 617 __cpu_name[cpu] = "NEC Vr41xx";
614 break; 618 break;
615 } 619 }
616 set_isa(c, MIPS_CPU_ISA_III);
617 c->options = R4K_OPTS;
618 c->tlbsize = 32;
619 break; 620 break;
620 case PRID_IMP_R4300: 621 case PRID_IMP_R4300:
621 c->cputype = CPU_R4300; 622 c->cputype = CPU_R4300;
@@ -1226,10 +1227,8 @@ __cpuinit void cpu_probe(void)
1226 if (c->options & MIPS_CPU_FPU) { 1227 if (c->options & MIPS_CPU_FPU) {
1227 c->fpu_id = cpu_get_fpu_id(); 1228 c->fpu_id = cpu_get_fpu_id();
1228 1229
1229 if (c->isa_level == MIPS_CPU_ISA_M32R1 || 1230 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1230 c->isa_level == MIPS_CPU_ISA_M32R2 || 1231 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
1231 c->isa_level == MIPS_CPU_ISA_M64R1 ||
1232 c->isa_level == MIPS_CPU_ISA_M64R2) {
1233 if (c->fpu_id & MIPS_FPIR_3D) 1232 if (c->fpu_id & MIPS_FPIR_3D)
1234 c->ases |= MIPS_ASE_MIPS3D; 1233 c->ases |= MIPS_ASE_MIPS3D;
1235 } 1234 }
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 8eeee1c860c0..db9655f08892 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
171 err = compat_sys_shmctl(first, second, compat_ptr(ptr)); 171 err = compat_sys_shmctl(first, second, compat_ptr(ptr));
172 break; 172 break;
173 default: 173 default:
174 err = -EINVAL; 174 err = -ENOSYS;
175 break; 175 break;
176 } 176 }
177 177
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 165867673357..33d067148e61 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -46,10 +46,9 @@
46 PTR_L a5, PT_R9(sp) 46 PTR_L a5, PT_R9(sp)
47 PTR_L a6, PT_R10(sp) 47 PTR_L a6, PT_R10(sp)
48 PTR_L a7, PT_R11(sp) 48 PTR_L a7, PT_R11(sp)
49#else
50 PTR_ADDIU sp, PT_SIZE
51#endif 49#endif
52.endm 50 PTR_ADDIU sp, PT_SIZE
51 .endm
53 52
54 .macro RETURN_BACK 53 .macro RETURN_BACK
55 jr ra 54 jr ra
@@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra)
68 .globl _mcount 67 .globl _mcount
69_mcount: 68_mcount:
70 b ftrace_stub 69 b ftrace_stub
71 addiu sp,sp,8 70#ifdef CONFIG_32BIT
71 addiu sp,sp,8
72#else
73 nop
74#endif
72 75
73 /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ 76 /* When tracing is activated, it calls ftrace_caller+8 (aka here) */
74 lw t1, function_trace_stop 77 lw t1, function_trace_stop
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 135c4aadccbe..7a54f74b7818 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
67 if (cpu_has_mips_r) { 67 if (cpu_has_mips_r) {
68 seq_printf(m, "isa\t\t\t:"); 68 seq_printf(m, "isa\t\t\t:");
69 if (cpu_has_mips_1) 69 if (cpu_has_mips_1)
70 seq_printf(m, "%s", "mips1"); 70 seq_printf(m, "%s", " mips1");
71 if (cpu_has_mips_2) 71 if (cpu_has_mips_2)
72 seq_printf(m, "%s", " mips2"); 72 seq_printf(m, "%s", " mips2");
73 if (cpu_has_mips_3) 73 if (cpu_has_mips_3)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index a200b5bdbb87..c3abb88170fc 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1571#ifdef CONFIG_64BIT 1571#ifdef CONFIG_64BIT
1572 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; 1572 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1573#endif 1573#endif
1574 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) 1574 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
1575 status_set |= ST0_XX; 1575 status_set |= ST0_XX;
1576 if (cpu_has_dsp) 1576 if (cpu_has_dsp)
1577 status_set |= ST0_MX; 1577 status_set |= ST0_MX;
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index 81f1dcfdcab8..a64daee740ee 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr,
90 unsigned bit = nr & SZLONG_MASK; 90 unsigned bit = nr & SZLONG_MASK;
91 unsigned long mask; 91 unsigned long mask;
92 unsigned long flags; 92 unsigned long flags;
93 unsigned long res; 93 int res;
94 94
95 a += nr >> SZLONG_LOG; 95 a += nr >> SZLONG_LOG;
96 mask = 1UL << bit; 96 mask = 1UL << bit;
97 raw_local_irq_save(flags); 97 raw_local_irq_save(flags);
98 res = (mask & *a); 98 res = (mask & *a) != 0;
99 *a |= mask; 99 *a |= mask;
100 raw_local_irq_restore(flags); 100 raw_local_irq_restore(flags);
101 return res; 101 return res;
@@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr,
116 unsigned bit = nr & SZLONG_MASK; 116 unsigned bit = nr & SZLONG_MASK;
117 unsigned long mask; 117 unsigned long mask;
118 unsigned long flags; 118 unsigned long flags;
119 unsigned long res; 119 int res;
120 120
121 a += nr >> SZLONG_LOG; 121 a += nr >> SZLONG_LOG;
122 mask = 1UL << bit; 122 mask = 1UL << bit;
123 raw_local_irq_save(flags); 123 raw_local_irq_save(flags);
124 res = (mask & *a); 124 res = (mask & *a) != 0;
125 *a |= mask; 125 *a |= mask;
126 raw_local_irq_restore(flags); 126 raw_local_irq_restore(flags);
127 return res; 127 return res;
@@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
141 unsigned bit = nr & SZLONG_MASK; 141 unsigned bit = nr & SZLONG_MASK;
142 unsigned long mask; 142 unsigned long mask;
143 unsigned long flags; 143 unsigned long flags;
144 unsigned long res; 144 int res;
145 145
146 a += nr >> SZLONG_LOG; 146 a += nr >> SZLONG_LOG;
147 mask = 1UL << bit; 147 mask = 1UL << bit;
148 raw_local_irq_save(flags); 148 raw_local_irq_save(flags);
149 res = (mask & *a); 149 res = (mask & *a) != 0;
150 *a &= ~mask; 150 *a &= ~mask;
151 raw_local_irq_restore(flags); 151 raw_local_irq_restore(flags);
152 return res; 152 return res;
@@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
166 unsigned bit = nr & SZLONG_MASK; 166 unsigned bit = nr & SZLONG_MASK;
167 unsigned long mask; 167 unsigned long mask;
168 unsigned long flags; 168 unsigned long flags;
169 unsigned long res; 169 int res;
170 170
171 a += nr >> SZLONG_LOG; 171 a += nr >> SZLONG_LOG;
172 mask = 1UL << bit; 172 mask = 1UL << bit;
173 raw_local_irq_save(flags); 173 raw_local_irq_save(flags);
174 res = (mask & *a); 174 res = (mask & *a) != 0;
175 *a ^= mask; 175 *a ^= mask;
176 raw_local_irq_restore(flags); 176 raw_local_irq_restore(flags);
177 return res; 177 return res;
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 507147aebd41..a6adffbb4e5f 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -270,7 +270,7 @@ LEAF(csum_partial)
270#endif 270#endif
271 271
272 /* odd buffer alignment? */ 272 /* odd buffer alignment? */
273#ifdef CPU_MIPSR2 273#ifdef CONFIG_CPU_MIPSR2
274 wsbh v1, sum 274 wsbh v1, sum
275 movn sum, v1, t7 275 movn sum, v1, t7
276#else 276#else
@@ -670,7 +670,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
670 addu sum, v1 670 addu sum, v1
671#endif 671#endif
672 672
673#ifdef CPU_MIPSR2 673#ifdef CONFIG_CPU_MIPSR2
674 wsbh v1, sum 674 wsbh v1, sum
675 movn sum, v1, odd 675 movn sum, v1, odd
676#else 676#else
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index ecca559b8d7b..2078915eacb9 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void)
1247 return; 1247 return;
1248 1248
1249 default: 1249 default:
1250 if (c->isa_level == MIPS_CPU_ISA_M32R1 || 1250 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1251 c->isa_level == MIPS_CPU_ISA_M32R2 || 1251 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
1252 c->isa_level == MIPS_CPU_ISA_M64R1 ||
1253 c->isa_level == MIPS_CPU_ISA_M64R2) {
1254#ifdef CONFIG_MIPS_CPU_SCACHE 1252#ifdef CONFIG_MIPS_CPU_SCACHE
1255 if (mips_sc_init ()) { 1253 if (mips_sc_init ()) {
1256 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; 1254 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 93d937b4b1ba..df96da7e939b 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void)
98 c->scache.flags |= MIPS_CACHE_NOT_PRESENT; 98 c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
99 99
100 /* Ignore anything but MIPSxx processors */ 100 /* Ignore anything but MIPSxx processors */
101 if (c->isa_level != MIPS_CPU_ISA_M32R1 && 101 if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
102 c->isa_level != MIPS_CPU_ISA_M32R2 && 102 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
103 c->isa_level != MIPS_CPU_ISA_M64R1 &&
104 c->isa_level != MIPS_CPU_ISA_M64R2)
105 return 0; 103 return 0;
106 104
107 /* Does this MIPS32/MIPS64 CPU have a config2 register? */ 105 /* Does this MIPS32/MIPS64 CPU have a config2 register? */
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index 38a80c83fd67..d1faece21b6a 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -19,7 +19,7 @@
19#include <asm/mach-au1x00/au1000.h> 19#include <asm/mach-au1x00/au1000.h>
20#include <asm/tlbmisc.h> 20#include <asm/tlbmisc.h>
21 21
22#ifdef CONFIG_DEBUG_PCI 22#ifdef CONFIG_PCI_DEBUG
23#define DBG(x...) printk(KERN_DEBUG x) 23#define DBG(x...) printk(KERN_DEBUG x)
24#else 24#else
25#define DBG(x...) do {} while (0) 25#define DBG(x...) do {} while (0)
@@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus,
162 if (status & (1 << 29)) { 162 if (status & (1 << 29)) {
163 *data = 0xffffffff; 163 *data = 0xffffffff;
164 error = -1; 164 error = -1;
165 DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", 165 DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n",
166 access_type, bus->number, device); 166 access_type, bus->number, device);
167 } else if ((status >> 28) & 0xf) { 167 } else if ((status >> 28) & 0xf) {
168 DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", 168 DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n",
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index f3eab8594d9f..d44a571e45a7 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -23,8 +23,10 @@
23#include <asm/code-patching.h> 23#include <asm/code-patching.h>
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25 25
26#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
26extern void epapr_ev_idle(void); 27extern void epapr_ev_idle(void);
27extern u32 epapr_ev_idle_start[]; 28extern u32 epapr_ev_idle_start[];
29#endif
28 30
29bool epapr_paravirt_enabled; 31bool epapr_paravirt_enabled;
30 32
@@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void)
47 49
48 for (i = 0; i < (len / 4); i++) { 50 for (i = 0; i < (len / 4); i++) {
49 patch_instruction(epapr_hypercall_start + i, insts[i]); 51 patch_instruction(epapr_hypercall_start + i, insts[i]);
52#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
50 patch_instruction(epapr_ev_idle_start + i, insts[i]); 53 patch_instruction(epapr_ev_idle_start + i, insts[i]);
54#endif
51 } 55 }
52 56
57#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
53 if (of_get_property(hyper_node, "has-idle", NULL)) 58 if (of_get_property(hyper_node, "has-idle", NULL))
54 ppc_md.power_save = epapr_ev_idle; 59 ppc_md.power_save = epapr_ev_idle;
60#endif
55 61
56 epapr_paravirt_enabled = true; 62 epapr_paravirt_enabled = true;
57 63
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 200afa5bcfb7..56bd92362ce1 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1066,78 +1066,6 @@ unrecov_user_slb:
1066#endif /* __DISABLED__ */ 1066#endif /* __DISABLED__ */
1067 1067
1068 1068
1069/*
1070 * r13 points to the PACA, r9 contains the saved CR,
1071 * r12 contain the saved SRR1, SRR0 is still ready for return
1072 * r3 has the faulting address
1073 * r9 - r13 are saved in paca->exslb.
1074 * r3 is saved in paca->slb_r3
1075 * We assume we aren't going to take any exceptions during this procedure.
1076 */
1077_GLOBAL(slb_miss_realmode)
1078 mflr r10
1079#ifdef CONFIG_RELOCATABLE
1080 mtctr r11
1081#endif
1082
1083 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1084 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1085
1086 bl .slb_allocate_realmode
1087
1088 /* All done -- return from exception. */
1089
1090 ld r10,PACA_EXSLB+EX_LR(r13)
1091 ld r3,PACA_EXSLB+EX_R3(r13)
1092 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1093
1094 mtlr r10
1095
1096 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1097 beq- 2f
1098
1099.machine push
1100.machine "power4"
1101 mtcrf 0x80,r9
1102 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1103.machine pop
1104
1105 RESTORE_PPR_PACA(PACA_EXSLB, r9)
1106 ld r9,PACA_EXSLB+EX_R9(r13)
1107 ld r10,PACA_EXSLB+EX_R10(r13)
1108 ld r11,PACA_EXSLB+EX_R11(r13)
1109 ld r12,PACA_EXSLB+EX_R12(r13)
1110 ld r13,PACA_EXSLB+EX_R13(r13)
1111 rfid
1112 b . /* prevent speculative execution */
1113
11142: mfspr r11,SPRN_SRR0
1115 ld r10,PACAKBASE(r13)
1116 LOAD_HANDLER(r10,unrecov_slb)
1117 mtspr SPRN_SRR0,r10
1118 ld r10,PACAKMSR(r13)
1119 mtspr SPRN_SRR1,r10
1120 rfid
1121 b .
1122
1123unrecov_slb:
1124 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1125 DISABLE_INTS
1126 bl .save_nvgprs
11271: addi r3,r1,STACK_FRAME_OVERHEAD
1128 bl .unrecoverable_exception
1129 b 1b
1130
1131
1132#ifdef CONFIG_PPC_970_NAP
1133power4_fixup_nap:
1134 andc r9,r9,r10
1135 std r9,TI_LOCAL_FLAGS(r11)
1136 ld r10,_LINK(r1) /* make idle task do the */
1137 std r10,_NIP(r1) /* equivalent of a blr */
1138 blr
1139#endif
1140
1141 .align 7 1069 .align 7
1142 .globl alignment_common 1070 .globl alignment_common
1143alignment_common: 1071alignment_common:
@@ -1336,6 +1264,78 @@ _GLOBAL(opal_mc_secondary_handler)
1336 1264
1337 1265
1338/* 1266/*
1267 * r13 points to the PACA, r9 contains the saved CR,
1268 * r12 contain the saved SRR1, SRR0 is still ready for return
1269 * r3 has the faulting address
1270 * r9 - r13 are saved in paca->exslb.
1271 * r3 is saved in paca->slb_r3
1272 * We assume we aren't going to take any exceptions during this procedure.
1273 */
1274_GLOBAL(slb_miss_realmode)
1275 mflr r10
1276#ifdef CONFIG_RELOCATABLE
1277 mtctr r11
1278#endif
1279
1280 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1281 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1282
1283 bl .slb_allocate_realmode
1284
1285 /* All done -- return from exception. */
1286
1287 ld r10,PACA_EXSLB+EX_LR(r13)
1288 ld r3,PACA_EXSLB+EX_R3(r13)
1289 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1290
1291 mtlr r10
1292
1293 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1294 beq- 2f
1295
1296.machine push
1297.machine "power4"
1298 mtcrf 0x80,r9
1299 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1300.machine pop
1301
1302 RESTORE_PPR_PACA(PACA_EXSLB, r9)
1303 ld r9,PACA_EXSLB+EX_R9(r13)
1304 ld r10,PACA_EXSLB+EX_R10(r13)
1305 ld r11,PACA_EXSLB+EX_R11(r13)
1306 ld r12,PACA_EXSLB+EX_R12(r13)
1307 ld r13,PACA_EXSLB+EX_R13(r13)
1308 rfid
1309 b . /* prevent speculative execution */
1310
13112: mfspr r11,SPRN_SRR0
1312 ld r10,PACAKBASE(r13)
1313 LOAD_HANDLER(r10,unrecov_slb)
1314 mtspr SPRN_SRR0,r10
1315 ld r10,PACAKMSR(r13)
1316 mtspr SPRN_SRR1,r10
1317 rfid
1318 b .
1319
1320unrecov_slb:
1321 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1322 DISABLE_INTS
1323 bl .save_nvgprs
13241: addi r3,r1,STACK_FRAME_OVERHEAD
1325 bl .unrecoverable_exception
1326 b 1b
1327
1328
1329#ifdef CONFIG_PPC_970_NAP
1330power4_fixup_nap:
1331 andc r9,r9,r10
1332 std r9,TI_LOCAL_FLAGS(r11)
1333 ld r10,_LINK(r1) /* make idle task do the */
1334 std r10,_NIP(r1) /* equivalent of a blr */
1335 blr
1336#endif
1337
1338/*
1339 * Hash table stuff 1339 * Hash table stuff
1340 */ 1340 */
1341 .align 7 1341 .align 7
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 0da39fed355a..299731e9036b 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
186 (0x1UL << 4), &dummy1, &dummy2); 186 (0x1UL << 4), &dummy1, &dummy2);
187 if (lpar_rc == H_SUCCESS) 187 if (lpar_rc == H_SUCCESS)
188 return i; 188 return i;
189 BUG_ON(lpar_rc != H_NOT_FOUND); 189
190 /*
191 * The test for adjunct partition is performed before the
192 * ANDCOND test. H_RESOURCE may be returned, so we need to
193 * check for that as well.
194 */
195 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
190 196
191 slot_offset++; 197 slot_offset++;
192 slot_offset &= 0x7; 198 slot_offset &= 0x7;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 4a2930844d43..4a5443118cfb 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -344,6 +344,7 @@ extern unsigned long MODULES_END;
344#define _REGION3_ENTRY_CO 0x100 /* change-recording override */ 344#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
345 345
346/* Bits in the segment table entry */ 346/* Bits in the segment table entry */
347#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
347#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 348#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
348#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 349#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
349#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 350#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
@@ -1531,7 +1532,8 @@ extern int s390_enable_sie(void);
1531/* 1532/*
1532 * No page table caches to initialise 1533 * No page table caches to initialise
1533 */ 1534 */
1534#define pgtable_cache_init() do { } while (0) 1535static inline void pgtable_cache_init(void) { }
1536static inline void check_pgt_cache(void) { }
1535 1537
1536#include <asm-generic/pgtable.h> 1538#include <asm-generic/pgtable.h>
1537 1539
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index dff631d34b45..466fb3383960 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to,
77 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address 77 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
78 * contains the (negative) exception code. 78 * contains the (negative) exception code.
79 */ 79 */
80static __always_inline unsigned long follow_table(struct mm_struct *mm, 80#ifdef CONFIG_64BIT
81 unsigned long addr, int write) 81static unsigned long follow_table(struct mm_struct *mm,
82 unsigned long address, int write)
82{ 83{
83 pgd_t *pgd; 84 unsigned long *table = (unsigned long *)__pa(mm->pgd);
84 pud_t *pud; 85
85 pmd_t *pmd; 86 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
86 pte_t *ptep; 87 case _ASCE_TYPE_REGION1:
88 table = table + ((address >> 53) & 0x7ff);
89 if (unlikely(*table & _REGION_ENTRY_INV))
90 return -0x39UL;
91 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
92 case _ASCE_TYPE_REGION2:
93 table = table + ((address >> 42) & 0x7ff);
94 if (unlikely(*table & _REGION_ENTRY_INV))
95 return -0x3aUL;
96 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
97 case _ASCE_TYPE_REGION3:
98 table = table + ((address >> 31) & 0x7ff);
99 if (unlikely(*table & _REGION_ENTRY_INV))
100 return -0x3bUL;
101 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
102 case _ASCE_TYPE_SEGMENT:
103 table = table + ((address >> 20) & 0x7ff);
104 if (unlikely(*table & _SEGMENT_ENTRY_INV))
105 return -0x10UL;
106 if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
107 if (write && (*table & _SEGMENT_ENTRY_RO))
108 return -0x04UL;
109 return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
110 (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
111 }
112 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
113 }
114 table = table + ((address >> 12) & 0xff);
115 if (unlikely(*table & _PAGE_INVALID))
116 return -0x11UL;
117 if (write && (*table & _PAGE_RO))
118 return -0x04UL;
119 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
120}
87 121
88 pgd = pgd_offset(mm, addr); 122#else /* CONFIG_64BIT */
89 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
90 return -0x3aUL;
91 123
92 pud = pud_offset(pgd, addr); 124static unsigned long follow_table(struct mm_struct *mm,
93 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 125 unsigned long address, int write)
94 return -0x3bUL; 126{
127 unsigned long *table = (unsigned long *)__pa(mm->pgd);
95 128
96 pmd = pmd_offset(pud, addr); 129 table = table + ((address >> 20) & 0x7ff);
97 if (pmd_none(*pmd)) 130 if (unlikely(*table & _SEGMENT_ENTRY_INV))
98 return -0x10UL; 131 return -0x10UL;
99 if (pmd_large(*pmd)) { 132 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
100 if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) 133 table = table + ((address >> 12) & 0xff);
101 return -0x04UL; 134 if (unlikely(*table & _PAGE_INVALID))
102 return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
103 }
104 if (unlikely(pmd_bad(*pmd)))
105 return -0x10UL;
106
107 ptep = pte_offset_map(pmd, addr);
108 if (!pte_present(*ptep))
109 return -0x11UL; 135 return -0x11UL;
110 if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) 136 if (write && (*table & _PAGE_RO))
111 return -0x04UL; 137 return -0x04UL;
112 138 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
113 return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
114} 139}
115 140
141#endif /* CONFIG_64BIT */
142
116static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, 143static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
117 size_t n, int write_user) 144 size_t n, int write_user)
118{ 145{
@@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
197 224
198static size_t clear_user_pt(size_t n, void __user *to) 225static size_t clear_user_pt(size_t n, void __user *to)
199{ 226{
200 void *zpage = &empty_zero_page; 227 void *zpage = (void *) empty_zero_page;
201 long done, size, ret; 228 long done, size, ret;
202 229
203 done = 0; 230 done = 0;
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 241c0bb60b12..c96f9bbb760d 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -40,7 +40,15 @@
40#include <asm/percpu.h> 40#include <asm/percpu.h>
41#include <arch/spr_def.h> 41#include <arch/spr_def.h>
42 42
43/* Set and clear kernel interrupt masks. */ 43/*
44 * Set and clear kernel interrupt masks.
45 *
46 * NOTE: __insn_mtspr() is a compiler builtin marked as a memory
47 * clobber. We rely on it being equivalent to a compiler barrier in
48 * this code since arch_local_irq_save() and friends must act as
49 * compiler barriers. This compiler semantic is baked into enough
50 * places that the compiler will maintain it going forward.
51 */
44#if CHIP_HAS_SPLIT_INTR_MASK() 52#if CHIP_HAS_SPLIT_INTR_MASK()
45#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 53#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
46# error Fix assumptions about which word various interrupts are in 54# error Fix assumptions about which word various interrupts are in
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index d1e15f7b59c6..7a5aa1a7864e 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot)
1004 1004
1005#ifdef CONFIG_BLK_DEV_INITRD 1005#ifdef CONFIG_BLK_DEV_INITRD
1006 1006
1007/*
1008 * Note that the kernel can potentially support other compression
1009 * techniques than gz, though we don't do so by default. If we ever
1010 * decide to do so we can either look for other filename extensions,
1011 * or just allow a file with this name to be compressed with an
1012 * arbitrary compressor (somewhat counterintuitively).
1013 */
1014static int __initdata set_initramfs_file; 1007static int __initdata set_initramfs_file;
1015static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; 1008static char __initdata initramfs_file[128] = "initramfs";
1016 1009
1017static int __init setup_initramfs_file(char *str) 1010static int __init setup_initramfs_file(char *str)
1018{ 1011{
@@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str)
1026early_param("initramfs_file", setup_initramfs_file); 1019early_param("initramfs_file", setup_initramfs_file);
1027 1020
1028/* 1021/*
1029 * We look for an "initramfs.cpio.gz" file in the hvfs. 1022 * We look for a file called "initramfs" in the hvfs. If there is one, we
1030 * If there is one, we allocate some memory for it and it will be 1023 * allocate some memory for it and it will be unpacked to the initramfs.
1031 * unpacked to the initramfs. 1024 * If it's compressed, the initd code will uncompress it first.
1032 */ 1025 */
1033static void __init load_hv_initrd(void) 1026static void __init load_hv_initrd(void)
1034{ 1027{
@@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void)
1038 1031
1039 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); 1032 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
1040 if (fd == HV_ENOENT) { 1033 if (fd == HV_ENOENT) {
1041 if (set_initramfs_file) 1034 if (set_initramfs_file) {
1042 pr_warning("No such hvfs initramfs file '%s'\n", 1035 pr_warning("No such hvfs initramfs file '%s'\n",
1043 initramfs_file); 1036 initramfs_file);
1044 return; 1037 return;
1038 } else {
1039 /* Try old backwards-compatible name. */
1040 fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
1041 if (fd == HV_ENOENT)
1042 return;
1043 }
1045 } 1044 }
1046 BUG_ON(fd < 0); 1045 BUG_ON(fd < 0);
1047 stat = hv_fs_fstat(fd); 1046 stat = hv_fs_fstat(fd);
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 8a84501acb1b..5ef205c5f37b 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -4,7 +4,7 @@
4# create a compressed vmlinux image from the original vmlinux 4# create a compressed vmlinux image from the original vmlinux
5# 5#
6 6
7targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o 7targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo
8 8
9KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 9KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
10KBUILD_CFLAGS += -fno-strict-aliasing -fPIC 10KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
29 $(obj)/piggy.o 29 $(obj)/piggy.o
30 30
31$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone 31$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
32$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone
33 32
34ifeq ($(CONFIG_EFI_STUB), y) 33ifeq ($(CONFIG_EFI_STUB), y)
35 VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o 34 VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
@@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
43$(obj)/vmlinux.bin: vmlinux FORCE 42$(obj)/vmlinux.bin: vmlinux FORCE
44 $(call if_changed,objcopy) 43 $(call if_changed,objcopy)
45 44
46targets += vmlinux.bin.all vmlinux.relocs 45targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs
47 46
48CMD_RELOCS = arch/x86/tools/relocs 47CMD_RELOCS = arch/x86/tools/relocs
49quiet_cmd_relocs = RELOCS $@ 48quiet_cmd_relocs = RELOCS $@
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 5edd1742cfd0..7361e47db79f 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
703 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); 703 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
704} 704}
705 705
706void arch_flush_lazy_mmu_mode(void); 706static inline void arch_flush_lazy_mmu_mode(void)
707{
708 PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
709}
707 710
708static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 711static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
709 phys_addr_t phys, pgprot_t flags) 712 phys_addr_t phys, pgprot_t flags)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 142236ed83af..b3b0ec1dac86 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -91,6 +91,7 @@ struct pv_lazy_ops {
91 /* Set deferred update mode, used for batching operations. */ 91 /* Set deferred update mode, used for batching operations. */
92 void (*enter)(void); 92 void (*enter)(void);
93 void (*leave)(void); 93 void (*leave)(void);
94 void (*flush)(void);
94}; 95};
95 96
96struct pv_time_ops { 97struct pv_time_ops {
@@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next);
679 680
680void paravirt_enter_lazy_mmu(void); 681void paravirt_enter_lazy_mmu(void);
681void paravirt_leave_lazy_mmu(void); 682void paravirt_leave_lazy_mmu(void);
683void paravirt_flush_lazy_mmu(void);
682 684
683void _paravirt_nop(void); 685void _paravirt_nop(void);
684u32 _paravirt_ident_32(u32); 686u32 _paravirt_ident_32(u32);
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 1ace47b62592..2e188d68397c 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[];
29 */ 29 */
30static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) 30static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
31{ 31{
32 return regs->orig_ax & __SYSCALL_MASK; 32 return regs->orig_ax;
33} 33}
34 34
35static inline void syscall_rollback(struct task_struct *task, 35static inline void syscall_rollback(struct task_struct *task,
36 struct pt_regs *regs) 36 struct pt_regs *regs)
37{ 37{
38 regs->ax = regs->orig_ax & __SYSCALL_MASK; 38 regs->ax = regs->orig_ax;
39} 39}
40 40
41static inline long syscall_get_error(struct task_struct *task, 41static inline long syscall_get_error(struct task_struct *task,
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 4fef20773b8f..c7797307fc2b 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -7,7 +7,7 @@
7 7
8#define tlb_flush(tlb) \ 8#define tlb_flush(tlb) \
9{ \ 9{ \
10 if (tlb->fullmm == 0) \ 10 if (!tlb->fullmm && !tlb->need_flush_all) \
11 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ 11 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
12 else \ 12 else \
13 flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ 13 flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index c20d1ce62dc6..e709884d0ef9 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
382 return _hypercall3(int, console_io, cmd, count, str); 382 return _hypercall3(int, console_io, cmd, count, str);
383} 383}
384 384
385extern int __must_check HYPERVISOR_physdev_op_compat(int, void *); 385extern int __must_check xen_physdev_op_compat(int, void *);
386 386
387static inline int 387static inline int
388HYPERVISOR_physdev_op(int cmd, void *arg) 388HYPERVISOR_physdev_op(int cmd, void *arg)
389{ 389{
390 int rc = _hypercall2(int, physdev_op, cmd, arg); 390 int rc = _hypercall2(int, physdev_op, cmd, arg);
391 if (unlikely(rc == -ENOSYS)) 391 if (unlikely(rc == -ENOSYS))
392 rc = HYPERVISOR_physdev_op_compat(cmd, arg); 392 rc = xen_physdev_op_compat(cmd, arg);
393 return rc; 393 return rc;
394} 394}
395 395
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 892ce40a7470..7a060f4b411f 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -44,6 +44,7 @@
44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
46 46
47#define MSR_PLATFORM_INFO 0x000000ce
47#define MSR_MTRRcap 0x000000fe 48#define MSR_MTRRcap 0x000000fe
48#define MSR_IA32_BBL_CR_CTL 0x00000119 49#define MSR_IA32_BBL_CR_CTL 0x00000119
49#define MSR_IA32_BBL_CR_CTL3 0x0000011e 50#define MSR_IA32_BBL_CR_CTL3 0x0000011e
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index b05a575d56f4..26830f3af0df 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void)
314 if (top <= at) 314 if (top <= at)
315 return 0; 315 return 0;
316 316
317 memset(&regs, 0, sizeof(regs));
318
317 ds->bts_index = ds->bts_buffer_base; 319 ds->bts_index = ds->bts_buffer_base;
318 320
319 perf_sample_data_init(&data, 0, event->hw.last_period); 321 perf_sample_data_init(&data, 0, event->hw.last_period);
320 regs.ip = 0;
321 322
322 /* 323 /*
323 * Prepare a generic sample, i.e. fill in the invariant fields. 324 * Prepare a generic sample, i.e. fill in the invariant fields.
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index 7890bc838952..d893e8ed8ac9 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
90 struct microcode_intel ***mc_saved; 90 struct microcode_intel ***mc_saved;
91 91
92 mc_saved = (struct microcode_intel ***) 92 mc_saved = (struct microcode_intel ***)
93 __pa_symbol(&mc_saved_data->mc_saved); 93 __pa_nodebug(&mc_saved_data->mc_saved);
94 for (i = 0; i < mc_saved_data->mc_saved_count; i++) { 94 for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
95 struct microcode_intel *p; 95 struct microcode_intel *p;
96 96
97 p = *(struct microcode_intel **) 97 p = *(struct microcode_intel **)
98 __pa(mc_saved_data->mc_saved + i); 98 __pa_nodebug(mc_saved_data->mc_saved + i);
99 mc_saved_tmp[i] = (struct microcode_intel *)__pa(p); 99 mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
100 } 100 }
101} 101}
102#endif 102#endif
@@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end,
562 struct cpio_data cd; 562 struct cpio_data cd;
563 long offset = 0; 563 long offset = 0;
564#ifdef CONFIG_X86_32 564#ifdef CONFIG_X86_32
565 char *p = (char *)__pa_symbol(ucode_name); 565 char *p = (char *)__pa_nodebug(ucode_name);
566#else 566#else
567 char *p = ucode_name; 567 char *p = ucode_name;
568#endif 568#endif
@@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
630 if (mc_intel == NULL) 630 if (mc_intel == NULL)
631 return; 631 return;
632 632
633 delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info); 633 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
634 current_mc_date_p = (int *)__pa_symbol(&current_mc_date); 634 current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
635 635
636 *delay_ucode_info_p = 1; 636 *delay_ucode_info_p = 1;
637 *current_mc_date_p = mc_intel->hdr.date; 637 *current_mc_date_p = mc_intel->hdr.date;
@@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
659} 659}
660#endif 660#endif
661 661
662static int apply_microcode_early(struct mc_saved_data *mc_saved_data, 662static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data,
663 struct ucode_cpu_info *uci) 663 struct ucode_cpu_info *uci)
664{ 664{
665 struct microcode_intel *mc_intel; 665 struct microcode_intel *mc_intel;
666 unsigned int val[2]; 666 unsigned int val[2];
@@ -741,15 +741,15 @@ load_ucode_intel_bsp(void)
741#ifdef CONFIG_X86_32 741#ifdef CONFIG_X86_32
742 struct boot_params *boot_params_p; 742 struct boot_params *boot_params_p;
743 743
744 boot_params_p = (struct boot_params *)__pa_symbol(&boot_params); 744 boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params);
745 ramdisk_image = boot_params_p->hdr.ramdisk_image; 745 ramdisk_image = boot_params_p->hdr.ramdisk_image;
746 ramdisk_size = boot_params_p->hdr.ramdisk_size; 746 ramdisk_size = boot_params_p->hdr.ramdisk_size;
747 initrd_start_early = ramdisk_image; 747 initrd_start_early = ramdisk_image;
748 initrd_end_early = initrd_start_early + ramdisk_size; 748 initrd_end_early = initrd_start_early + ramdisk_size;
749 749
750 _load_ucode_intel_bsp( 750 _load_ucode_intel_bsp(
751 (struct mc_saved_data *)__pa_symbol(&mc_saved_data), 751 (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
752 (unsigned long *)__pa_symbol(&mc_saved_in_initrd), 752 (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
753 initrd_start_early, initrd_end_early, &uci); 753 initrd_start_early, initrd_end_early, &uci);
754#else 754#else
755 ramdisk_image = boot_params.hdr.ramdisk_image; 755 ramdisk_image = boot_params.hdr.ramdisk_image;
@@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void)
772 unsigned long *initrd_start_p; 772 unsigned long *initrd_start_p;
773 773
774 mc_saved_in_initrd_p = 774 mc_saved_in_initrd_p =
775 (unsigned long *)__pa_symbol(mc_saved_in_initrd); 775 (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
776 mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data); 776 mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
777 initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start); 777 initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
778 initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p); 778 initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
779#else 779#else
780 mc_saved_data_p = &mc_saved_data; 780 mc_saved_data_p = &mc_saved_data;
781 mc_saved_in_initrd_p = mc_saved_in_initrd; 781 mc_saved_in_initrd_p = mc_saved_in_initrd;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 17fff18a1031..8bfb335f74bb 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
263 leave_lazy(PARAVIRT_LAZY_MMU); 263 leave_lazy(PARAVIRT_LAZY_MMU);
264} 264}
265 265
266void paravirt_flush_lazy_mmu(void)
267{
268 preempt_disable();
269
270 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
271 arch_leave_lazy_mmu_mode();
272 arch_enter_lazy_mmu_mode();
273 }
274
275 preempt_enable();
276}
277
266void paravirt_start_context_switch(struct task_struct *prev) 278void paravirt_start_context_switch(struct task_struct *prev)
267{ 279{
268 BUG_ON(preemptible()); 280 BUG_ON(preemptible());
@@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
292 return this_cpu_read(paravirt_lazy_mode); 304 return this_cpu_read(paravirt_lazy_mode);
293} 305}
294 306
295void arch_flush_lazy_mmu_mode(void)
296{
297 preempt_disable();
298
299 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
300 arch_leave_lazy_mmu_mode();
301 arch_enter_lazy_mmu_mode();
302 }
303
304 preempt_enable();
305}
306
307struct pv_info pv_info = { 307struct pv_info pv_info = {
308 .name = "bare hardware", 308 .name = "bare hardware",
309 .paravirt_enabled = 0, 309 .paravirt_enabled = 0,
@@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
475 .lazy_mode = { 475 .lazy_mode = {
476 .enter = paravirt_nop, 476 .enter = paravirt_nop,
477 .leave = paravirt_nop, 477 .leave = paravirt_nop,
478 .flush = paravirt_nop,
478 }, 479 },
479 480
480 .set_fixmap = native_set_fixmap, 481 .set_fixmap = native_set_fixmap,
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 02b51dd4e4ad..f77df1c5de6e 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
1857 if (!pv_eoi_enabled(vcpu)) 1857 if (!pv_eoi_enabled(vcpu))
1858 return 0; 1858 return 0;
1859 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, 1859 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
1860 addr); 1860 addr, sizeof(u8));
1861} 1861}
1862 1862
1863void kvm_lapic_init(void) 1863void kvm_lapic_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f19ac0aca60d..e1721324c271 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1823,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
1823 return 0; 1823 return 0;
1824 } 1824 }
1825 1825
1826 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) 1826 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
1827 sizeof(u32)))
1827 return 1; 1828 return 1;
1828 1829
1829 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); 1830 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
@@ -1952,12 +1953,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1952 1953
1953 gpa_offset = data & ~(PAGE_MASK | 1); 1954 gpa_offset = data & ~(PAGE_MASK | 1);
1954 1955
1955 /* Check that the address is 32-byte aligned. */
1956 if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
1957 break;
1958
1959 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, 1956 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
1960 &vcpu->arch.pv_time, data & ~1ULL)) 1957 &vcpu->arch.pv_time, data & ~1ULL,
1958 sizeof(struct pvclock_vcpu_time_info)))
1961 vcpu->arch.pv_time_enabled = false; 1959 vcpu->arch.pv_time_enabled = false;
1962 else 1960 else
1963 vcpu->arch.pv_time_enabled = true; 1961 vcpu->arch.pv_time_enabled = true;
@@ -1977,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1977 return 1; 1975 return 1;
1978 1976
1979 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, 1977 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
1980 data & KVM_STEAL_VALID_BITS)) 1978 data & KVM_STEAL_VALID_BITS,
1979 sizeof(struct kvm_steal_time)))
1981 return 1; 1980 return 1;
1982 1981
1983 vcpu->arch.st.msr_val = data; 1982 vcpu->arch.st.msr_val = data;
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 1cbd89ca5569..7114c63f047d 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1334,6 +1334,7 @@ __init void lguest_init(void)
1334 pv_mmu_ops.read_cr3 = lguest_read_cr3; 1334 pv_mmu_ops.read_cr3 = lguest_read_cr3;
1335 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; 1335 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1336 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; 1336 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
1337 pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
1337 pv_mmu_ops.pte_update = lguest_pte_update; 1338 pv_mmu_ops.pte_update = lguest_pte_update;
1338 pv_mmu_ops.pte_update_defer = lguest_pte_update; 1339 pv_mmu_ops.pte_update_defer = lguest_pte_update;
1339 1340
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 05928aae911e..906fea315791 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
74 char c; 74 char c;
75 unsigned zero_len; 75 unsigned zero_len;
76 76
77 for (; len; --len) { 77 for (; len; --len, to++) {
78 if (__get_user_nocheck(c, from++, sizeof(char))) 78 if (__get_user_nocheck(c, from++, sizeof(char)))
79 break; 79 break;
80 if (__put_user_nocheck(c, to++, sizeof(char))) 80 if (__put_user_nocheck(c, to, sizeof(char)))
81 break; 81 break;
82 } 82 }
83 83
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2b97525246d4..0e883364abb5 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
378 if (pgd_none(*pgd_ref)) 378 if (pgd_none(*pgd_ref))
379 return -1; 379 return -1;
380 380
381 if (pgd_none(*pgd)) 381 if (pgd_none(*pgd)) {
382 set_pgd(pgd, *pgd_ref); 382 set_pgd(pgd, *pgd_ref);
383 else 383 arch_flush_lazy_mmu_mode();
384 } else {
384 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 385 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
386 }
385 387
386 /* 388 /*
387 * Below here mismatches are bugs because these lower tables 389 * Below here mismatches are bugs because these lower tables
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index b0086567271c..0e38951e65eb 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -68,7 +68,7 @@ static int print_split(struct split_state *s)
68 s->gpg++; 68 s->gpg++;
69 i += GPS/PAGE_SIZE; 69 i += GPS/PAGE_SIZE;
70 } else if (level == PG_LEVEL_2M) { 70 } else if (level == PG_LEVEL_2M) {
71 if (!(pte_val(*pte) & _PAGE_PSE)) { 71 if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
72 printk(KERN_ERR 72 printk(KERN_ERR
73 "%lx level %d but not PSE %Lx\n", 73 "%lx level %d but not PSE %Lx\n",
74 addr, level, (u64)pte_val(*pte)); 74 addr, level, (u64)pte_val(*pte));
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 091934e1d0d9..fb4e73ec24d8 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
467 * We are safe now. Check whether the new pgprot is the same: 467 * We are safe now. Check whether the new pgprot is the same:
468 */ 468 */
469 old_pte = *kpte; 469 old_pte = *kpte;
470 old_prot = new_prot = req_prot = pte_pgprot(old_pte); 470 old_prot = req_prot = pte_pgprot(old_pte);
471 471
472 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); 472 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
473 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); 473 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
@@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
478 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL 478 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
479 * for the ancient hardware that doesn't support it. 479 * for the ancient hardware that doesn't support it.
480 */ 480 */
481 if (pgprot_val(new_prot) & _PAGE_PRESENT) 481 if (pgprot_val(req_prot) & _PAGE_PRESENT)
482 pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL; 482 pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
483 else 483 else
484 pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); 484 pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
485 485
486 new_prot = canon_pgprot(new_prot); 486 req_prot = canon_pgprot(req_prot);
487 487
488 /* 488 /*
489 * old_pte points to the large page base address. So we need 489 * old_pte points to the large page base address. So we need
@@ -1413,6 +1413,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
1413 * but that can deadlock->flush only current cpu: 1413 * but that can deadlock->flush only current cpu:
1414 */ 1414 */
1415 __flush_tlb_all(); 1415 __flush_tlb_all();
1416
1417 arch_flush_lazy_mmu_mode();
1416} 1418}
1417 1419
1418#ifdef CONFIG_HIBERNATION 1420#ifdef CONFIG_HIBERNATION
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 193350b51f90..17fda6a8b3c2 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
58void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 58void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
59{ 59{
60 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); 60 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
61 /*
62 * NOTE! For PAE, any changes to the top page-directory-pointer-table
63 * entries need a full cr3 reload to flush.
64 */
65#ifdef CONFIG_X86_PAE
66 tlb->need_flush_all = 1;
67#endif
61 tlb_remove_page(tlb, virt_to_page(pmd)); 68 tlb_remove_page(tlb, virt_to_page(pmd));
62} 69}
63 70
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index e8e34938c57d..e006c18d288a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3)
1467 __xen_write_cr3(true, cr3); 1467 __xen_write_cr3(true, cr3);
1468 1468
1469 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ 1469 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1470
1471 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1472} 1470}
1473#endif 1471#endif
1474 1472
@@ -1750,14 +1748,18 @@ static void *m2v(phys_addr_t maddr)
1750} 1748}
1751 1749
1752/* Set the page permissions on an identity-mapped pages */ 1750/* Set the page permissions on an identity-mapped pages */
1753static void set_page_prot(void *addr, pgprot_t prot) 1751static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
1754{ 1752{
1755 unsigned long pfn = __pa(addr) >> PAGE_SHIFT; 1753 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1756 pte_t pte = pfn_pte(pfn, prot); 1754 pte_t pte = pfn_pte(pfn, prot);
1757 1755
1758 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) 1756 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1759 BUG(); 1757 BUG();
1760} 1758}
1759static void set_page_prot(void *addr, pgprot_t prot)
1760{
1761 return set_page_prot_flags(addr, prot, UVMF_NONE);
1762}
1761#ifdef CONFIG_X86_32 1763#ifdef CONFIG_X86_32
1762static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) 1764static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1763{ 1765{
@@ -1841,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1841 unsigned long addr) 1843 unsigned long addr)
1842{ 1844{
1843 if (*pt_base == PFN_DOWN(__pa(addr))) { 1845 if (*pt_base == PFN_DOWN(__pa(addr))) {
1844 set_page_prot((void *)addr, PAGE_KERNEL); 1846 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1845 clear_page((void *)addr); 1847 clear_page((void *)addr);
1846 (*pt_base)++; 1848 (*pt_base)++;
1847 } 1849 }
1848 if (*pt_end == PFN_DOWN(__pa(addr))) { 1850 if (*pt_end == PFN_DOWN(__pa(addr))) {
1849 set_page_prot((void *)addr, PAGE_KERNEL); 1851 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1850 clear_page((void *)addr); 1852 clear_page((void *)addr);
1851 (*pt_end)--; 1853 (*pt_end)--;
1852 } 1854 }
@@ -2122,6 +2124,7 @@ static void __init xen_post_allocator_init(void)
2122#endif 2124#endif
2123 2125
2124#ifdef CONFIG_X86_64 2126#ifdef CONFIG_X86_64
2127 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2125 SetPagePinned(virt_to_page(level3_user_vsyscall)); 2128 SetPagePinned(virt_to_page(level3_user_vsyscall));
2126#endif 2129#endif
2127 xen_mark_init_mm_pinned(); 2130 xen_mark_init_mm_pinned();
@@ -2197,6 +2200,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2197 .lazy_mode = { 2200 .lazy_mode = {
2198 .enter = paravirt_enter_lazy_mmu, 2201 .enter = paravirt_enter_lazy_mmu,
2199 .leave = xen_leave_lazy_mmu, 2202 .leave = xen_leave_lazy_mmu,
2203 .flush = paravirt_flush_lazy_mmu,
2200 }, 2204 },
2201 2205
2202 .set_fixmap = xen_set_fixmap, 2206 .set_fixmap = xen_set_fixmap,
diff --git a/block/blk-flush.c b/block/blk-flush.c
index db8f1b507857..cc2b827a853c 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -444,7 +444,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
444 * copied from blk_rq_pos(rq). 444 * copied from blk_rq_pos(rq).
445 */ 445 */
446 if (error_sector) 446 if (error_sector)
447 *error_sector = bio->bi_sector; 447 *error_sector = bio->bi_sector;
448 448
449 if (!bio_flagged(bio, BIO_UPTODATE)) 449 if (!bio_flagged(bio, BIO_UPTODATE))
450 ret = -EIO; 450 ret = -EIO;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 6206a934eb8c..5efc5a647183 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -229,6 +229,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
229 unsigned long val; \ 229 unsigned long val; \
230 ssize_t ret; \ 230 ssize_t ret; \
231 ret = queue_var_store(&val, page, count); \ 231 ret = queue_var_store(&val, page, count); \
232 if (ret < 0) \
233 return ret; \
232 if (neg) \ 234 if (neg) \
233 val = !val; \ 235 val = !val; \
234 \ 236 \
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 137ad1ec5438..13ccbda34ff9 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -44,6 +44,7 @@ struct crypto_rfc4543_ctx {
44 44
45struct crypto_rfc4543_req_ctx { 45struct crypto_rfc4543_req_ctx {
46 u8 auth_tag[16]; 46 u8 auth_tag[16];
47 u8 assocbuf[32];
47 struct scatterlist cipher[1]; 48 struct scatterlist cipher[1];
48 struct scatterlist payload[2]; 49 struct scatterlist payload[2];
49 struct scatterlist assoc[2]; 50 struct scatterlist assoc[2];
@@ -1133,9 +1134,19 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
1133 scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); 1134 scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
1134 assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); 1135 assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
1135 1136
1136 sg_init_table(assoc, 2); 1137 if (req->assoc->length == req->assoclen) {
1137 sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, 1138 sg_init_table(assoc, 2);
1138 req->assoc->offset); 1139 sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
1140 req->assoc->offset);
1141 } else {
1142 BUG_ON(req->assoclen > sizeof(rctx->assocbuf));
1143
1144 scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0,
1145 req->assoclen, 0);
1146
1147 sg_init_table(assoc, 2);
1148 sg_set_buf(assoc, rctx->assocbuf, req->assoclen);
1149 }
1139 scatterwalk_crypto_chain(assoc, payload, 0, 2); 1150 scatterwalk_crypto_chain(assoc, payload, 0, 2);
1140 1151
1141 aead_request_set_tfm(subreq, ctx->child); 1152 aead_request_set_tfm(subreq, ctx->child);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 92ed9692c47e..4bf68c8d4797 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -396,7 +396,7 @@ config ACPI_CUSTOM_METHOD
396 396
397config ACPI_BGRT 397config ACPI_BGRT
398 bool "Boottime Graphics Resource Table support" 398 bool "Boottime Graphics Resource Table support"
399 depends on EFI 399 depends on EFI && X86
400 help 400 help
401 This driver adds support for exposing the ACPI Boottime Graphics 401 This driver adds support for exposing the ACPI Boottime Graphics
402 Resource Table, which allows the operating system to obtain 402 Resource Table, which allows the operating system to obtain
diff --git a/drivers/acpi/acpi_i2c.c b/drivers/acpi/acpi_i2c.c
index 82045e3f5cac..a82c7626aa9b 100644
--- a/drivers/acpi/acpi_i2c.c
+++ b/drivers/acpi/acpi_i2c.c
@@ -90,7 +90,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adapter)
90 acpi_handle handle; 90 acpi_handle handle;
91 acpi_status status; 91 acpi_status status;
92 92
93 handle = ACPI_HANDLE(&adapter->dev); 93 handle = ACPI_HANDLE(adapter->dev.parent);
94 if (!handle) 94 if (!handle)
95 return; 95 return;
96 96
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index 1e5d8a40101e..fefc2ca7cc3e 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -405,7 +405,7 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
405 return rc; 405 return rc;
406 data_len = estatus->data_length; 406 data_len = estatus->data_length;
407 gdata = (struct acpi_hest_generic_data *)(estatus + 1); 407 gdata = (struct acpi_hest_generic_data *)(estatus + 1);
408 while (data_len > sizeof(*gdata)) { 408 while (data_len >= sizeof(*gdata)) {
409 gedata_len = gdata->error_data_length; 409 gedata_len = gdata->error_data_length;
410 if (gedata_len > data_len - sizeof(*gdata)) 410 if (gedata_len > data_len - sizeof(*gdata))
411 return -EINVAL; 411 return -EINVAL;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 0ac546d5e53f..6ae5e440436e 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -415,7 +415,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
415 struct acpi_pci_root *root; 415 struct acpi_pci_root *root;
416 struct acpi_pci_driver *driver; 416 struct acpi_pci_driver *driver;
417 u32 flags, base_flags; 417 u32 flags, base_flags;
418 bool is_osc_granted = false;
419 418
420 root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); 419 root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
421 if (!root) 420 if (!root)
@@ -476,6 +475,30 @@ static int acpi_pci_root_add(struct acpi_device *device,
476 flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; 475 flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
477 acpi_pci_osc_support(root, flags); 476 acpi_pci_osc_support(root, flags);
478 477
478 /*
479 * TBD: Need PCI interface for enumeration/configuration of roots.
480 */
481
482 mutex_lock(&acpi_pci_root_lock);
483 list_add_tail(&root->node, &acpi_pci_roots);
484 mutex_unlock(&acpi_pci_root_lock);
485
486 /*
487 * Scan the Root Bridge
488 * --------------------
489 * Must do this prior to any attempt to bind the root device, as the
490 * PCI namespace does not get created until this call is made (and
491 * thus the root bridge's pci_dev does not exist).
492 */
493 root->bus = pci_acpi_scan_root(root);
494 if (!root->bus) {
495 printk(KERN_ERR PREFIX
496 "Bus %04x:%02x not present in PCI namespace\n",
497 root->segment, (unsigned int)root->secondary.start);
498 result = -ENODEV;
499 goto out_del_root;
500 }
501
479 /* Indicate support for various _OSC capabilities. */ 502 /* Indicate support for various _OSC capabilities. */
480 if (pci_ext_cfg_avail()) 503 if (pci_ext_cfg_avail())
481 flags |= OSC_EXT_PCI_CONFIG_SUPPORT; 504 flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
@@ -494,6 +517,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
494 flags = base_flags; 517 flags = base_flags;
495 } 518 }
496 } 519 }
520
497 if (!pcie_ports_disabled 521 if (!pcie_ports_disabled
498 && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { 522 && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
499 flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 523 flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
@@ -514,54 +538,28 @@ static int acpi_pci_root_add(struct acpi_device *device,
514 status = acpi_pci_osc_control_set(device->handle, &flags, 538 status = acpi_pci_osc_control_set(device->handle, &flags,
515 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); 539 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
516 if (ACPI_SUCCESS(status)) { 540 if (ACPI_SUCCESS(status)) {
517 is_osc_granted = true;
518 dev_info(&device->dev, 541 dev_info(&device->dev,
519 "ACPI _OSC control (0x%02x) granted\n", flags); 542 "ACPI _OSC control (0x%02x) granted\n", flags);
543 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
544 /*
545 * We have ASPM control, but the FADT indicates
546 * that it's unsupported. Clear it.
547 */
548 pcie_clear_aspm(root->bus);
549 }
520 } else { 550 } else {
521 is_osc_granted = false;
522 dev_info(&device->dev, 551 dev_info(&device->dev,
523 "ACPI _OSC request failed (%s), " 552 "ACPI _OSC request failed (%s), "
524 "returned control mask: 0x%02x\n", 553 "returned control mask: 0x%02x\n",
525 acpi_format_exception(status), flags); 554 acpi_format_exception(status), flags);
555 pr_info("ACPI _OSC control for PCIe not granted, "
556 "disabling ASPM\n");
557 pcie_no_aspm();
526 } 558 }
527 } else { 559 } else {
528 dev_info(&device->dev, 560 dev_info(&device->dev,
529 "Unable to request _OSC control " 561 "Unable to request _OSC control "
530 "(_OSC support mask: 0x%02x)\n", flags); 562 "(_OSC support mask: 0x%02x)\n", flags);
531 }
532
533 /*
534 * TBD: Need PCI interface for enumeration/configuration of roots.
535 */
536
537 mutex_lock(&acpi_pci_root_lock);
538 list_add_tail(&root->node, &acpi_pci_roots);
539 mutex_unlock(&acpi_pci_root_lock);
540
541 /*
542 * Scan the Root Bridge
543 * --------------------
544 * Must do this prior to any attempt to bind the root device, as the
545 * PCI namespace does not get created until this call is made (and
546 * thus the root bridge's pci_dev does not exist).
547 */
548 root->bus = pci_acpi_scan_root(root);
549 if (!root->bus) {
550 printk(KERN_ERR PREFIX
551 "Bus %04x:%02x not present in PCI namespace\n",
552 root->segment, (unsigned int)root->secondary.start);
553 result = -ENODEV;
554 goto out_del_root;
555 }
556
557 /* ASPM setting */
558 if (is_osc_granted) {
559 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM)
560 pcie_clear_aspm(root->bus);
561 } else {
562 pr_info("ACPI _OSC control for PCIe not granted, "
563 "disabling ASPM\n");
564 pcie_no_aspm();
565 } 563 }
566 564
567 pci_acpi_add_bus_pm_notifier(device, root->bus); 565 pci_acpi_add_bus_pm_notifier(device, root->bus);
@@ -646,6 +644,7 @@ static void handle_root_bridge_insertion(acpi_handle handle)
646 644
647static void handle_root_bridge_removal(struct acpi_device *device) 645static void handle_root_bridge_removal(struct acpi_device *device)
648{ 646{
647 acpi_status status;
649 struct acpi_eject_event *ej_event; 648 struct acpi_eject_event *ej_event;
650 649
651 ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); 650 ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
@@ -661,7 +660,9 @@ static void handle_root_bridge_removal(struct acpi_device *device)
661 ej_event->device = device; 660 ej_event->device = device;
662 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; 661 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
663 662
664 acpi_bus_hot_remove_device(ej_event); 663 status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
664 if (ACPI_FAILURE(status))
665 kfree(ej_event);
665} 666}
666 667
667static void _handle_hotplug_event_root(struct work_struct *work) 668static void _handle_hotplug_event_root(struct work_struct *work)
@@ -676,8 +677,9 @@ static void _handle_hotplug_event_root(struct work_struct *work)
676 handle = hp_work->handle; 677 handle = hp_work->handle;
677 type = hp_work->type; 678 type = hp_work->type;
678 679
679 root = acpi_pci_find_root(handle); 680 acpi_scan_lock_acquire();
680 681
682 root = acpi_pci_find_root(handle);
681 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 683 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
682 684
683 switch (type) { 685 switch (type) {
@@ -711,6 +713,7 @@ static void _handle_hotplug_event_root(struct work_struct *work)
711 break; 713 break;
712 } 714 }
713 715
716 acpi_scan_lock_release();
714 kfree(hp_work); /* allocated in handle_hotplug_event_bridge */ 717 kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
715 kfree(buffer.pointer); 718 kfree(buffer.pointer);
716} 719}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index fc95308e9a11..ee255c60bdac 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -66,7 +66,8 @@ module_param(latency_factor, uint, 0644);
66 66
67static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); 67static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
68 68
69static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX]; 69static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
70 acpi_cstate);
70 71
71static int disabled_by_idle_boot_param(void) 72static int disabled_by_idle_boot_param(void)
72{ 73{
@@ -722,7 +723,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
722 struct cpuidle_driver *drv, int index) 723 struct cpuidle_driver *drv, int index)
723{ 724{
724 struct acpi_processor *pr; 725 struct acpi_processor *pr;
725 struct acpi_processor_cx *cx = acpi_cstate[index]; 726 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
726 727
727 pr = __this_cpu_read(processors); 728 pr = __this_cpu_read(processors);
728 729
@@ -745,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
745 */ 746 */
746static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) 747static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
747{ 748{
748 struct acpi_processor_cx *cx = acpi_cstate[index]; 749 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
749 750
750 ACPI_FLUSH_CPU_CACHE(); 751 ACPI_FLUSH_CPU_CACHE();
751 752
@@ -775,7 +776,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
775 struct cpuidle_driver *drv, int index) 776 struct cpuidle_driver *drv, int index)
776{ 777{
777 struct acpi_processor *pr; 778 struct acpi_processor *pr;
778 struct acpi_processor_cx *cx = acpi_cstate[index]; 779 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
779 780
780 pr = __this_cpu_read(processors); 781 pr = __this_cpu_read(processors);
781 782
@@ -833,7 +834,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
833 struct cpuidle_driver *drv, int index) 834 struct cpuidle_driver *drv, int index)
834{ 835{
835 struct acpi_processor *pr; 836 struct acpi_processor *pr;
836 struct acpi_processor_cx *cx = acpi_cstate[index]; 837 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
837 838
838 pr = __this_cpu_read(processors); 839 pr = __this_cpu_read(processors);
839 840
@@ -960,7 +961,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
960 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 961 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
961 continue; 962 continue;
962#endif 963#endif
963 acpi_cstate[count] = cx; 964 per_cpu(acpi_cstate[count], dev->cpu) = cx;
964 965
965 count++; 966 count++;
966 if (count == CPUIDLE_STATE_MAX) 967 if (count == CPUIDLE_STATE_MAX)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 24213033fbae..9c1a435d10e6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -193,6 +193,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
193 }, 193 },
194 { 194 {
195 .callback = init_nvs_nosave, 195 .callback = init_nvs_nosave,
196 .ident = "Sony Vaio VGN-FW21M",
197 .matches = {
198 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
199 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
200 },
201 },
202 {
203 .callback = init_nvs_nosave,
196 .ident = "Sony Vaio VPCEB17FX", 204 .ident = "Sony Vaio VPCEB17FX",
197 .matches = { 205 .matches = {
198 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 206 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ffdd32d22602..2f48123d74c4 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -150,6 +150,7 @@ enum piix_controller_ids {
150 tolapai_sata, 150 tolapai_sata,
151 piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ 151 piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
152 ich8_sata_snb, 152 ich8_sata_snb,
153 ich8_2port_sata_snb,
153}; 154};
154 155
155struct piix_map_db { 156struct piix_map_db {
@@ -304,7 +305,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
304 /* SATA Controller IDE (Lynx Point) */ 305 /* SATA Controller IDE (Lynx Point) */
305 { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, 306 { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
306 /* SATA Controller IDE (Lynx Point) */ 307 /* SATA Controller IDE (Lynx Point) */
307 { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 308 { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
308 /* SATA Controller IDE (Lynx Point) */ 309 /* SATA Controller IDE (Lynx Point) */
309 { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 310 { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
310 /* SATA Controller IDE (Lynx Point-LP) */ 311 /* SATA Controller IDE (Lynx Point-LP) */
@@ -439,6 +440,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
439 [ich8m_apple_sata] = &ich8m_apple_map_db, 440 [ich8m_apple_sata] = &ich8m_apple_map_db,
440 [tolapai_sata] = &tolapai_map_db, 441 [tolapai_sata] = &tolapai_map_db,
441 [ich8_sata_snb] = &ich8_map_db, 442 [ich8_sata_snb] = &ich8_map_db,
443 [ich8_2port_sata_snb] = &ich8_2port_map_db,
442}; 444};
443 445
444static struct pci_bits piix_enable_bits[] = { 446static struct pci_bits piix_enable_bits[] = {
@@ -1242,6 +1244,16 @@ static struct ata_port_info piix_port_info[] = {
1242 .udma_mask = ATA_UDMA6, 1244 .udma_mask = ATA_UDMA6,
1243 .port_ops = &piix_sata_ops, 1245 .port_ops = &piix_sata_ops,
1244 }, 1246 },
1247
1248 [ich8_2port_sata_snb] =
1249 {
1250 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR
1251 | PIIX_FLAG_PIO16,
1252 .pio_mask = ATA_PIO4,
1253 .mwdma_mask = ATA_MWDMA2,
1254 .udma_mask = ATA_UDMA6,
1255 .port_ops = &piix_sata_ops,
1256 },
1245}; 1257};
1246 1258
1247#define AHCI_PCI_BAR 5 1259#define AHCI_PCI_BAR 5
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 497adea1f0d6..63c743baf920 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev)
2329 * from SATA Settings page of Identify Device Data Log. 2329 * from SATA Settings page of Identify Device Data Log.
2330 */ 2330 */
2331 if (ata_id_has_devslp(dev->id)) { 2331 if (ata_id_has_devslp(dev->id)) {
2332 u8 sata_setting[ATA_SECT_SIZE]; 2332 u8 *sata_setting = ap->sector_buf;
2333 int i, j; 2333 int i, j;
2334 2334
2335 dev->flags |= ATA_DFLAG_DEVSLP; 2335 dev->flags |= ATA_DFLAG_DEVSLP;
@@ -2439,6 +2439,9 @@ int ata_dev_configure(struct ata_device *dev)
2439 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2439 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2440 dev->max_sectors); 2440 dev->max_sectors);
2441 2441
2442 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2443 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2444
2442 if (ap->ops->dev_config) 2445 if (ap->ops->dev_config)
2443 ap->ops->dev_config(dev); 2446 ap->ops->dev_config(dev);
2444 2447
@@ -4100,6 +4103,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4100 /* Weird ATAPI devices */ 4103 /* Weird ATAPI devices */
4101 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4104 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4102 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4105 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4106 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4103 4107
4104 /* Devices we expect to fail diagnostics */ 4108 /* Devices we expect to fail diagnostics */
4105 4109
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 318b41358187..ff44787e5a45 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -532,8 +532,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
532 struct scsi_sense_hdr sshdr; 532 struct scsi_sense_hdr sshdr;
533 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 533 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
534 &sshdr); 534 &sshdr);
535 if (sshdr.sense_key == 0 && 535 if (sshdr.sense_key == RECOVERED_ERROR &&
536 sshdr.asc == 0 && sshdr.ascq == 0) 536 sshdr.asc == 0 && sshdr.ascq == 0x1d)
537 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 537 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
538 } 538 }
539 539
@@ -618,8 +618,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
618 struct scsi_sense_hdr sshdr; 618 struct scsi_sense_hdr sshdr;
619 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 619 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
620 &sshdr); 620 &sshdr);
621 if (sshdr.sense_key == 0 && 621 if (sshdr.sense_key == RECOVERED_ERROR &&
622 sshdr.asc == 0 && sshdr.ascq == 0) 622 sshdr.asc == 0 && sshdr.ascq == 0x1d)
623 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 623 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
624 } 624 }
625 625
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 5f74587ef258..71671c42ef45 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -46,6 +46,7 @@
46#include "power.h" 46#include "power.h"
47 47
48static DEFINE_MUTEX(dev_pm_qos_mtx); 48static DEFINE_MUTEX(dev_pm_qos_mtx);
49static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
49 50
50static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); 51static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
51 52
@@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
216 struct pm_qos_constraints *c; 217 struct pm_qos_constraints *c;
217 struct pm_qos_flags *f; 218 struct pm_qos_flags *f;
218 219
219 mutex_lock(&dev_pm_qos_mtx); 220 mutex_lock(&dev_pm_qos_sysfs_mtx);
220 221
221 /* 222 /*
222 * If the device's PM QoS resume latency limit or PM QoS flags have been 223 * If the device's PM QoS resume latency limit or PM QoS flags have been
223 * exposed to user space, they have to be hidden at this point. 224 * exposed to user space, they have to be hidden at this point.
224 */ 225 */
226 pm_qos_sysfs_remove_latency(dev);
227 pm_qos_sysfs_remove_flags(dev);
228
229 mutex_lock(&dev_pm_qos_mtx);
230
225 __dev_pm_qos_hide_latency_limit(dev); 231 __dev_pm_qos_hide_latency_limit(dev);
226 __dev_pm_qos_hide_flags(dev); 232 __dev_pm_qos_hide_flags(dev);
227 233
@@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
254 260
255 out: 261 out:
256 mutex_unlock(&dev_pm_qos_mtx); 262 mutex_unlock(&dev_pm_qos_mtx);
263
264 mutex_unlock(&dev_pm_qos_sysfs_mtx);
257} 265}
258 266
259/** 267/**
@@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
558 kfree(req); 566 kfree(req);
559} 567}
560 568
569static void dev_pm_qos_drop_user_request(struct device *dev,
570 enum dev_pm_qos_req_type type)
571{
572 mutex_lock(&dev_pm_qos_mtx);
573 __dev_pm_qos_drop_user_request(dev, type);
574 mutex_unlock(&dev_pm_qos_mtx);
575}
576
561/** 577/**
562 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. 578 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
563 * @dev: Device whose PM QoS latency limit is to be exposed to user space. 579 * @dev: Device whose PM QoS latency limit is to be exposed to user space.
@@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
581 return ret; 597 return ret;
582 } 598 }
583 599
600 mutex_lock(&dev_pm_qos_sysfs_mtx);
601
584 mutex_lock(&dev_pm_qos_mtx); 602 mutex_lock(&dev_pm_qos_mtx);
585 603
586 if (IS_ERR_OR_NULL(dev->power.qos)) 604 if (IS_ERR_OR_NULL(dev->power.qos))
@@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
591 if (ret < 0) { 609 if (ret < 0) {
592 __dev_pm_qos_remove_request(req); 610 __dev_pm_qos_remove_request(req);
593 kfree(req); 611 kfree(req);
612 mutex_unlock(&dev_pm_qos_mtx);
594 goto out; 613 goto out;
595 } 614 }
596
597 dev->power.qos->latency_req = req; 615 dev->power.qos->latency_req = req;
616
617 mutex_unlock(&dev_pm_qos_mtx);
618
598 ret = pm_qos_sysfs_add_latency(dev); 619 ret = pm_qos_sysfs_add_latency(dev);
599 if (ret) 620 if (ret)
600 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 621 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
601 622
602 out: 623 out:
603 mutex_unlock(&dev_pm_qos_mtx); 624 mutex_unlock(&dev_pm_qos_sysfs_mtx);
604 return ret; 625 return ret;
605} 626}
606EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); 627EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
607 628
608static void __dev_pm_qos_hide_latency_limit(struct device *dev) 629static void __dev_pm_qos_hide_latency_limit(struct device *dev)
609{ 630{
610 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) { 631 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
611 pm_qos_sysfs_remove_latency(dev);
612 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 632 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
613 }
614} 633}
615 634
616/** 635/**
@@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev)
619 */ 638 */
620void dev_pm_qos_hide_latency_limit(struct device *dev) 639void dev_pm_qos_hide_latency_limit(struct device *dev)
621{ 640{
641 mutex_lock(&dev_pm_qos_sysfs_mtx);
642
643 pm_qos_sysfs_remove_latency(dev);
644
622 mutex_lock(&dev_pm_qos_mtx); 645 mutex_lock(&dev_pm_qos_mtx);
623 __dev_pm_qos_hide_latency_limit(dev); 646 __dev_pm_qos_hide_latency_limit(dev);
624 mutex_unlock(&dev_pm_qos_mtx); 647 mutex_unlock(&dev_pm_qos_mtx);
648
649 mutex_unlock(&dev_pm_qos_sysfs_mtx);
625} 650}
626EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 651EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
627 652
@@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
649 } 674 }
650 675
651 pm_runtime_get_sync(dev); 676 pm_runtime_get_sync(dev);
677 mutex_lock(&dev_pm_qos_sysfs_mtx);
678
652 mutex_lock(&dev_pm_qos_mtx); 679 mutex_lock(&dev_pm_qos_mtx);
653 680
654 if (IS_ERR_OR_NULL(dev->power.qos)) 681 if (IS_ERR_OR_NULL(dev->power.qos))
@@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
659 if (ret < 0) { 686 if (ret < 0) {
660 __dev_pm_qos_remove_request(req); 687 __dev_pm_qos_remove_request(req);
661 kfree(req); 688 kfree(req);
689 mutex_unlock(&dev_pm_qos_mtx);
662 goto out; 690 goto out;
663 } 691 }
664
665 dev->power.qos->flags_req = req; 692 dev->power.qos->flags_req = req;
693
694 mutex_unlock(&dev_pm_qos_mtx);
695
666 ret = pm_qos_sysfs_add_flags(dev); 696 ret = pm_qos_sysfs_add_flags(dev);
667 if (ret) 697 if (ret)
668 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 698 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
669 699
670 out: 700 out:
671 mutex_unlock(&dev_pm_qos_mtx); 701 mutex_unlock(&dev_pm_qos_sysfs_mtx);
672 pm_runtime_put(dev); 702 pm_runtime_put(dev);
673 return ret; 703 return ret;
674} 704}
@@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
676 706
677static void __dev_pm_qos_hide_flags(struct device *dev) 707static void __dev_pm_qos_hide_flags(struct device *dev)
678{ 708{
679 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) { 709 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
680 pm_qos_sysfs_remove_flags(dev);
681 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 710 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
682 }
683} 711}
684 712
685/** 713/**
@@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev)
689void dev_pm_qos_hide_flags(struct device *dev) 717void dev_pm_qos_hide_flags(struct device *dev)
690{ 718{
691 pm_runtime_get_sync(dev); 719 pm_runtime_get_sync(dev);
720 mutex_lock(&dev_pm_qos_sysfs_mtx);
721
722 pm_qos_sysfs_remove_flags(dev);
723
692 mutex_lock(&dev_pm_qos_mtx); 724 mutex_lock(&dev_pm_qos_mtx);
693 __dev_pm_qos_hide_flags(dev); 725 __dev_pm_qos_hide_flags(dev);
694 mutex_unlock(&dev_pm_qos_mtx); 726 mutex_unlock(&dev_pm_qos_mtx);
727
728 mutex_unlock(&dev_pm_qos_sysfs_mtx);
695 pm_runtime_put(dev); 729 pm_runtime_put(dev);
696} 730}
697EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); 731EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index e6732cf7c06e..79f4fca9877a 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
398 base = 0; 398 base = 0;
399 399
400 if (max < rbnode->base_reg + rbnode->blklen) 400 if (max < rbnode->base_reg + rbnode->blklen)
401 end = rbnode->base_reg + rbnode->blklen - max; 401 end = max - rbnode->base_reg + 1;
402 else 402 else
403 end = rbnode->blklen; 403 end = rbnode->blklen;
404 404
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 3d2367501fd0..58cfb3232428 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -710,12 +710,12 @@ skip_format_initialization:
710 } 710 }
711 } 711 }
712 712
713 regmap_debugfs_init(map, config->name);
714
713 ret = regcache_init(map, config); 715 ret = regcache_init(map, config);
714 if (ret != 0) 716 if (ret != 0)
715 goto err_range; 717 goto err_range;
716 718
717 regmap_debugfs_init(map, config->name);
718
719 /* Add a devres resource for dev_get_regmap() */ 719 /* Add a devres resource for dev_get_regmap() */
720 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 720 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
721 if (!m) { 721 if (!m) {
@@ -1036,6 +1036,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
1036 kfree(async->work_buf); 1036 kfree(async->work_buf);
1037 kfree(async); 1037 kfree(async);
1038 } 1038 }
1039
1040 return ret;
1039 } 1041 }
1040 1042
1041 trace_regmap_hw_write_start(map->dev, reg, 1043 trace_regmap_hw_write_start(map->dev, reg,
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 5dc0daed8fac..b81ddfea1da0 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -532,11 +532,11 @@ config BLK_DEV_RBD
532 If unsure, say N. 532 If unsure, say N.
533 533
534config BLK_DEV_RSXX 534config BLK_DEV_RSXX
535 tristate "RamSam PCIe Flash SSD Device Driver" 535 tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver"
536 depends on PCI 536 depends on PCI
537 help 537 help
538 Device driver for IBM's high speed PCIe SSD 538 Device driver for IBM's high speed PCIe SSD
539 storage devices: RamSan-70 and RamSan-80. 539 storage devices: FlashSystem-70 and FlashSystem-80.
540 540
541 To compile this driver as a module, choose M here: the 541 To compile this driver as a module, choose M here: the
542 module will be called rsxx. 542 module will be called rsxx.
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 25ef5c014fca..92b6d7c51e39 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -51,8 +51,9 @@ new_skb(ulong len)
51{ 51{
52 struct sk_buff *skb; 52 struct sk_buff *skb;
53 53
54 skb = alloc_skb(len, GFP_ATOMIC); 54 skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
55 if (skb) { 55 if (skb) {
56 skb_reserve(skb, MAX_HEADER);
56 skb_reset_mac_header(skb); 57 skb_reset_mac_header(skb);
57 skb_reset_network_header(skb); 58 skb_reset_network_header(skb);
58 skb->protocol = __constant_htons(ETH_P_AOE); 59 skb->protocol = __constant_htons(ETH_P_AOE);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index ade58bc8f3c4..1c1b8e544aa2 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -4206,7 +4206,7 @@ static int cciss_find_cfgtables(ctlr_info_t *h)
4206 if (rc) 4206 if (rc)
4207 return rc; 4207 return rc;
4208 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 4208 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
4209 cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable)); 4209 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
4210 if (!h->cfgtable) 4210 if (!h->cfgtable)
4211 return -ENOMEM; 4211 return -ENOMEM;
4212 rc = write_driver_ver_to_cfgtable(h->cfgtable); 4212 rc = write_driver_ver_to_cfgtable(h->cfgtable);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 747bb2af69dc..dfe758382eaf 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -922,6 +922,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
922 lo->lo_flags |= LO_FLAGS_PARTSCAN; 922 lo->lo_flags |= LO_FLAGS_PARTSCAN;
923 if (lo->lo_flags & LO_FLAGS_PARTSCAN) 923 if (lo->lo_flags & LO_FLAGS_PARTSCAN)
924 ioctl_by_bdev(bdev, BLKRRPART, 0); 924 ioctl_by_bdev(bdev, BLKRRPART, 0);
925
926 /* Grab the block_device to prevent its destruction after we
927 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
928 */
929 bdgrab(bdev);
925 return 0; 930 return 0;
926 931
927out_clr: 932out_clr:
@@ -1031,8 +1036,10 @@ static int loop_clr_fd(struct loop_device *lo)
1031 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); 1036 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
1032 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); 1037 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
1033 memset(lo->lo_file_name, 0, LO_NAME_SIZE); 1038 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1034 if (bdev) 1039 if (bdev) {
1040 bdput(bdev);
1035 invalidate_bdev(bdev); 1041 invalidate_bdev(bdev);
1042 }
1036 set_capacity(lo->lo_disk, 0); 1043 set_capacity(lo->lo_disk, 0);
1037 loop_sysfs_exit(lo); 1044 loop_sysfs_exit(lo);
1038 if (bdev) { 1045 if (bdev) {
@@ -1623,6 +1630,7 @@ static int loop_add(struct loop_device **l, int i)
1623 goto out_free_dev; 1630 goto out_free_dev;
1624 i = err; 1631 i = err;
1625 1632
1633 err = -ENOMEM;
1626 lo->lo_queue = blk_alloc_queue(GFP_KERNEL); 1634 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1627 if (!lo->lo_queue) 1635 if (!lo->lo_queue)
1628 goto out_free_dev; 1636 goto out_free_dev;
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 1788f491e0fb..076ae7f1b781 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -890,8 +890,10 @@ static int mg_probe(struct platform_device *plat_dev)
890 gpio_direction_output(host->rst, 1); 890 gpio_direction_output(host->rst, 1);
891 891
892 /* reset out pin */ 892 /* reset out pin */
893 if (!(prv_data->dev_attr & MG_DEV_MASK)) 893 if (!(prv_data->dev_attr & MG_DEV_MASK)) {
894 err = -EINVAL;
894 goto probe_err_3a; 895 goto probe_err_3a;
896 }
895 897
896 if (prv_data->dev_attr != MG_BOOT_DEV) { 898 if (prv_data->dev_attr != MG_BOOT_DEV) {
897 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO, 899 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 11cc9522cdd4..32c678028e53 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -81,12 +81,17 @@
81/* Device instance number, incremented each time a device is probed. */ 81/* Device instance number, incremented each time a device is probed. */
82static int instance; 82static int instance;
83 83
84struct list_head online_list;
85struct list_head removing_list;
86spinlock_t dev_lock;
87
84/* 88/*
85 * Global variable used to hold the major block device number 89 * Global variable used to hold the major block device number
86 * allocated in mtip_init(). 90 * allocated in mtip_init().
87 */ 91 */
88static int mtip_major; 92static int mtip_major;
89static struct dentry *dfs_parent; 93static struct dentry *dfs_parent;
94static struct dentry *dfs_device_status;
90 95
91static u32 cpu_use[NR_CPUS]; 96static u32 cpu_use[NR_CPUS];
92 97
@@ -243,40 +248,31 @@ static inline void release_slot(struct mtip_port *port, int tag)
243/* 248/*
244 * Reset the HBA (without sleeping) 249 * Reset the HBA (without sleeping)
245 * 250 *
246 * Just like hba_reset, except does not call sleep, so can be
247 * run from interrupt/tasklet context.
248 *
249 * @dd Pointer to the driver data structure. 251 * @dd Pointer to the driver data structure.
250 * 252 *
251 * return value 253 * return value
252 * 0 The reset was successful. 254 * 0 The reset was successful.
253 * -1 The HBA Reset bit did not clear. 255 * -1 The HBA Reset bit did not clear.
254 */ 256 */
255static int hba_reset_nosleep(struct driver_data *dd) 257static int mtip_hba_reset(struct driver_data *dd)
256{ 258{
257 unsigned long timeout; 259 unsigned long timeout;
258 260
259 /* Chip quirk: quiesce any chip function */
260 mdelay(10);
261
262 /* Set the reset bit */ 261 /* Set the reset bit */
263 writel(HOST_RESET, dd->mmio + HOST_CTL); 262 writel(HOST_RESET, dd->mmio + HOST_CTL);
264 263
265 /* Flush */ 264 /* Flush */
266 readl(dd->mmio + HOST_CTL); 265 readl(dd->mmio + HOST_CTL);
267 266
268 /* 267 /* Spin for up to 2 seconds, waiting for reset acknowledgement */
269 * Wait 10ms then spin for up to 1 second 268 timeout = jiffies + msecs_to_jiffies(2000);
270 * waiting for reset acknowledgement 269 do {
271 */ 270 mdelay(10);
272 timeout = jiffies + msecs_to_jiffies(1000); 271 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
273 mdelay(10); 272 return -1;
274 while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
275 && time_before(jiffies, timeout))
276 mdelay(1);
277 273
278 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) 274 } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
279 return -1; 275 && time_before(jiffies, timeout));
280 276
281 if (readl(dd->mmio + HOST_CTL) & HOST_RESET) 277 if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
282 return -1; 278 return -1;
@@ -481,7 +477,7 @@ static void mtip_restart_port(struct mtip_port *port)
481 dev_warn(&port->dd->pdev->dev, 477 dev_warn(&port->dd->pdev->dev,
482 "PxCMD.CR not clear, escalating reset\n"); 478 "PxCMD.CR not clear, escalating reset\n");
483 479
484 if (hba_reset_nosleep(port->dd)) 480 if (mtip_hba_reset(port->dd))
485 dev_err(&port->dd->pdev->dev, 481 dev_err(&port->dd->pdev->dev,
486 "HBA reset escalation failed.\n"); 482 "HBA reset escalation failed.\n");
487 483
@@ -527,6 +523,26 @@ static void mtip_restart_port(struct mtip_port *port)
527 523
528} 524}
529 525
526static int mtip_device_reset(struct driver_data *dd)
527{
528 int rv = 0;
529
530 if (mtip_check_surprise_removal(dd->pdev))
531 return 0;
532
533 if (mtip_hba_reset(dd) < 0)
534 rv = -EFAULT;
535
536 mdelay(1);
537 mtip_init_port(dd->port);
538 mtip_start_port(dd->port);
539
540 /* Enable interrupts on the HBA. */
541 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
542 dd->mmio + HOST_CTL);
543 return rv;
544}
545
530/* 546/*
531 * Helper function for tag logging 547 * Helper function for tag logging
532 */ 548 */
@@ -632,7 +648,7 @@ static void mtip_timeout_function(unsigned long int data)
632 if (cmdto_cnt) { 648 if (cmdto_cnt) {
633 print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); 649 print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
634 if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { 650 if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
635 mtip_restart_port(port); 651 mtip_device_reset(port->dd);
636 wake_up_interruptible(&port->svc_wait); 652 wake_up_interruptible(&port->svc_wait);
637 } 653 }
638 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 654 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
@@ -1283,11 +1299,11 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1283 int rv = 0, ready2go = 1; 1299 int rv = 0, ready2go = 1;
1284 struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; 1300 struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
1285 unsigned long to; 1301 unsigned long to;
1302 struct driver_data *dd = port->dd;
1286 1303
1287 /* Make sure the buffer is 8 byte aligned. This is asic specific. */ 1304 /* Make sure the buffer is 8 byte aligned. This is asic specific. */
1288 if (buffer & 0x00000007) { 1305 if (buffer & 0x00000007) {
1289 dev_err(&port->dd->pdev->dev, 1306 dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
1290 "SG buffer is not 8 byte aligned\n");
1291 return -EFAULT; 1307 return -EFAULT;
1292 } 1308 }
1293 1309
@@ -1300,23 +1316,21 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1300 mdelay(100); 1316 mdelay(100);
1301 } while (time_before(jiffies, to)); 1317 } while (time_before(jiffies, to));
1302 if (!ready2go) { 1318 if (!ready2go) {
1303 dev_warn(&port->dd->pdev->dev, 1319 dev_warn(&dd->pdev->dev,
1304 "Internal cmd active. new cmd [%02X]\n", fis->command); 1320 "Internal cmd active. new cmd [%02X]\n", fis->command);
1305 return -EBUSY; 1321 return -EBUSY;
1306 } 1322 }
1307 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1323 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1308 port->ic_pause_timer = 0; 1324 port->ic_pause_timer = 0;
1309 1325
1310 if (fis->command == ATA_CMD_SEC_ERASE_UNIT) 1326 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
1311 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); 1327 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
1312 else if (fis->command == ATA_CMD_DOWNLOAD_MICRO)
1313 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
1314 1328
1315 if (atomic == GFP_KERNEL) { 1329 if (atomic == GFP_KERNEL) {
1316 if (fis->command != ATA_CMD_STANDBYNOW1) { 1330 if (fis->command != ATA_CMD_STANDBYNOW1) {
1317 /* wait for io to complete if non atomic */ 1331 /* wait for io to complete if non atomic */
1318 if (mtip_quiesce_io(port, 5000) < 0) { 1332 if (mtip_quiesce_io(port, 5000) < 0) {
1319 dev_warn(&port->dd->pdev->dev, 1333 dev_warn(&dd->pdev->dev,
1320 "Failed to quiesce IO\n"); 1334 "Failed to quiesce IO\n");
1321 release_slot(port, MTIP_TAG_INTERNAL); 1335 release_slot(port, MTIP_TAG_INTERNAL);
1322 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1336 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -1361,58 +1375,84 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1361 /* Issue the command to the hardware */ 1375 /* Issue the command to the hardware */
1362 mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); 1376 mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
1363 1377
1364 /* Poll if atomic, wait_for_completion otherwise */
1365 if (atomic == GFP_KERNEL) { 1378 if (atomic == GFP_KERNEL) {
1366 /* Wait for the command to complete or timeout. */ 1379 /* Wait for the command to complete or timeout. */
1367 if (wait_for_completion_timeout( 1380 if (wait_for_completion_interruptible_timeout(
1368 &wait, 1381 &wait,
1369 msecs_to_jiffies(timeout)) == 0) { 1382 msecs_to_jiffies(timeout)) <= 0) {
1370 dev_err(&port->dd->pdev->dev, 1383 if (rv == -ERESTARTSYS) { /* interrupted */
1371 "Internal command did not complete [%d] " 1384 dev_err(&dd->pdev->dev,
1372 "within timeout of %lu ms\n", 1385 "Internal command [%02X] was interrupted after %lu ms\n",
1373 atomic, timeout); 1386 fis->command, timeout);
1374 if (mtip_check_surprise_removal(port->dd->pdev) || 1387 rv = -EINTR;
1388 goto exec_ic_exit;
1389 } else if (rv == 0) /* timeout */
1390 dev_err(&dd->pdev->dev,
1391 "Internal command did not complete [%02X] within timeout of %lu ms\n",
1392 fis->command, timeout);
1393 else
1394 dev_err(&dd->pdev->dev,
1395 "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
1396 fis->command, rv, timeout);
1397
1398 if (mtip_check_surprise_removal(dd->pdev) ||
1375 test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1399 test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1376 &port->dd->dd_flag)) { 1400 &dd->dd_flag)) {
1401 dev_err(&dd->pdev->dev,
1402 "Internal command [%02X] wait returned due to SR\n",
1403 fis->command);
1377 rv = -ENXIO; 1404 rv = -ENXIO;
1378 goto exec_ic_exit; 1405 goto exec_ic_exit;
1379 } 1406 }
1407 mtip_device_reset(dd); /* recover from timeout issue */
1380 rv = -EAGAIN; 1408 rv = -EAGAIN;
1409 goto exec_ic_exit;
1381 } 1410 }
1382 } else { 1411 } else {
1412 u32 hba_stat, port_stat;
1413
1383 /* Spin for <timeout> checking if command still outstanding */ 1414 /* Spin for <timeout> checking if command still outstanding */
1384 timeout = jiffies + msecs_to_jiffies(timeout); 1415 timeout = jiffies + msecs_to_jiffies(timeout);
1385 while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1416 while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1386 & (1 << MTIP_TAG_INTERNAL)) 1417 & (1 << MTIP_TAG_INTERNAL))
1387 && time_before(jiffies, timeout)) { 1418 && time_before(jiffies, timeout)) {
1388 if (mtip_check_surprise_removal(port->dd->pdev)) { 1419 if (mtip_check_surprise_removal(dd->pdev)) {
1389 rv = -ENXIO; 1420 rv = -ENXIO;
1390 goto exec_ic_exit; 1421 goto exec_ic_exit;
1391 } 1422 }
1392 if ((fis->command != ATA_CMD_STANDBYNOW1) && 1423 if ((fis->command != ATA_CMD_STANDBYNOW1) &&
1393 test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1424 test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1394 &port->dd->dd_flag)) { 1425 &dd->dd_flag)) {
1395 rv = -ENXIO; 1426 rv = -ENXIO;
1396 goto exec_ic_exit; 1427 goto exec_ic_exit;
1397 } 1428 }
1398 if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) { 1429 port_stat = readl(port->mmio + PORT_IRQ_STAT);
1399 atomic_inc(&int_cmd->active); /* error */ 1430 if (!port_stat)
1400 break; 1431 continue;
1432
1433 if (port_stat & PORT_IRQ_ERR) {
1434 dev_err(&dd->pdev->dev,
1435 "Internal command [%02X] failed\n",
1436 fis->command);
1437 mtip_device_reset(dd);
1438 rv = -EIO;
1439 goto exec_ic_exit;
1440 } else {
1441 writel(port_stat, port->mmio + PORT_IRQ_STAT);
1442 hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
1443 if (hba_stat)
1444 writel(hba_stat,
1445 dd->mmio + HOST_IRQ_STAT);
1401 } 1446 }
1447 break;
1402 } 1448 }
1403 } 1449 }
1404 1450
1405 if (atomic_read(&int_cmd->active) > 1) {
1406 dev_err(&port->dd->pdev->dev,
1407 "Internal command [%02X] failed\n", fis->command);
1408 rv = -EIO;
1409 }
1410 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1451 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1411 & (1 << MTIP_TAG_INTERNAL)) { 1452 & (1 << MTIP_TAG_INTERNAL)) {
1412 rv = -ENXIO; 1453 rv = -ENXIO;
1413 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1454 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
1414 &port->dd->dd_flag)) { 1455 mtip_device_reset(dd);
1415 mtip_restart_port(port);
1416 rv = -EAGAIN; 1456 rv = -EAGAIN;
1417 } 1457 }
1418 } 1458 }
@@ -1724,7 +1764,8 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
1724 * -EINVAL Invalid parameters passed in, trim not supported 1764 * -EINVAL Invalid parameters passed in, trim not supported
1725 * -EIO Error submitting trim request to hw 1765 * -EIO Error submitting trim request to hw
1726 */ 1766 */
1727static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len) 1767static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
1768 unsigned int len)
1728{ 1769{
1729 int i, rv = 0; 1770 int i, rv = 0;
1730 u64 tlba, tlen, sect_left; 1771 u64 tlba, tlen, sect_left;
@@ -1811,45 +1852,6 @@ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
1811} 1852}
1812 1853
1813/* 1854/*
1814 * Reset the HBA.
1815 *
1816 * Resets the HBA by setting the HBA Reset bit in the Global
1817 * HBA Control register. After setting the HBA Reset bit the
1818 * function waits for 1 second before reading the HBA Reset
1819 * bit to make sure it has cleared. If HBA Reset is not clear
1820 * an error is returned. Cannot be used in non-blockable
1821 * context.
1822 *
1823 * @dd Pointer to the driver data structure.
1824 *
1825 * return value
1826 * 0 The reset was successful.
1827 * -1 The HBA Reset bit did not clear.
1828 */
1829static int mtip_hba_reset(struct driver_data *dd)
1830{
1831 mtip_deinit_port(dd->port);
1832
1833 /* Set the reset bit */
1834 writel(HOST_RESET, dd->mmio + HOST_CTL);
1835
1836 /* Flush */
1837 readl(dd->mmio + HOST_CTL);
1838
1839 /* Wait for reset to clear */
1840 ssleep(1);
1841
1842 /* Check the bit has cleared */
1843 if (readl(dd->mmio + HOST_CTL) & HOST_RESET) {
1844 dev_err(&dd->pdev->dev,
1845 "Reset bit did not clear.\n");
1846 return -1;
1847 }
1848
1849 return 0;
1850}
1851
1852/*
1853 * Display the identify command data. 1855 * Display the identify command data.
1854 * 1856 *
1855 * @port Pointer to the port data structure. 1857 * @port Pointer to the port data structure.
@@ -2710,6 +2712,100 @@ static ssize_t mtip_hw_show_status(struct device *dev,
2710 2712
2711static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); 2713static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
2712 2714
2715/* debugsfs entries */
2716
2717static ssize_t show_device_status(struct device_driver *drv, char *buf)
2718{
2719 int size = 0;
2720 struct driver_data *dd, *tmp;
2721 unsigned long flags;
2722 char id_buf[42];
2723 u16 status = 0;
2724
2725 spin_lock_irqsave(&dev_lock, flags);
2726 size += sprintf(&buf[size], "Devices Present:\n");
2727 list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
2728 if (dd->pdev) {
2729 if (dd->port &&
2730 dd->port->identify &&
2731 dd->port->identify_valid) {
2732 strlcpy(id_buf,
2733 (char *) (dd->port->identify + 10), 21);
2734 status = *(dd->port->identify + 141);
2735 } else {
2736 memset(id_buf, 0, 42);
2737 status = 0;
2738 }
2739
2740 if (dd->port &&
2741 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
2742 size += sprintf(&buf[size],
2743 " device %s %s (ftl rebuild %d %%)\n",
2744 dev_name(&dd->pdev->dev),
2745 id_buf,
2746 status);
2747 } else {
2748 size += sprintf(&buf[size],
2749 " device %s %s\n",
2750 dev_name(&dd->pdev->dev),
2751 id_buf);
2752 }
2753 }
2754 }
2755
2756 size += sprintf(&buf[size], "Devices Being Removed:\n");
2757 list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
2758 if (dd->pdev) {
2759 if (dd->port &&
2760 dd->port->identify &&
2761 dd->port->identify_valid) {
2762 strlcpy(id_buf,
2763 (char *) (dd->port->identify+10), 21);
2764 status = *(dd->port->identify + 141);
2765 } else {
2766 memset(id_buf, 0, 42);
2767 status = 0;
2768 }
2769
2770 if (dd->port &&
2771 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
2772 size += sprintf(&buf[size],
2773 " device %s %s (ftl rebuild %d %%)\n",
2774 dev_name(&dd->pdev->dev),
2775 id_buf,
2776 status);
2777 } else {
2778 size += sprintf(&buf[size],
2779 " device %s %s\n",
2780 dev_name(&dd->pdev->dev),
2781 id_buf);
2782 }
2783 }
2784 }
2785 spin_unlock_irqrestore(&dev_lock, flags);
2786
2787 return size;
2788}
2789
2790static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
2791 size_t len, loff_t *offset)
2792{
2793 int size = *offset;
2794 char buf[MTIP_DFS_MAX_BUF_SIZE];
2795
2796 if (!len || *offset)
2797 return 0;
2798
2799 size += show_device_status(NULL, buf);
2800
2801 *offset = size <= len ? size : len;
2802 size = copy_to_user(ubuf, buf, *offset);
2803 if (size)
2804 return -EFAULT;
2805
2806 return *offset;
2807}
2808
2713static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, 2809static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
2714 size_t len, loff_t *offset) 2810 size_t len, loff_t *offset)
2715{ 2811{
@@ -2804,6 +2900,13 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
2804 return *offset; 2900 return *offset;
2805} 2901}
2806 2902
2903static const struct file_operations mtip_device_status_fops = {
2904 .owner = THIS_MODULE,
2905 .open = simple_open,
2906 .read = mtip_hw_read_device_status,
2907 .llseek = no_llseek,
2908};
2909
2807static const struct file_operations mtip_regs_fops = { 2910static const struct file_operations mtip_regs_fops = {
2808 .owner = THIS_MODULE, 2911 .owner = THIS_MODULE,
2809 .open = simple_open, 2912 .open = simple_open,
@@ -4161,6 +4264,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
4161 const struct cpumask *node_mask; 4264 const struct cpumask *node_mask;
4162 int cpu, i = 0, j = 0; 4265 int cpu, i = 0, j = 0;
4163 int my_node = NUMA_NO_NODE; 4266 int my_node = NUMA_NO_NODE;
4267 unsigned long flags;
4164 4268
4165 /* Allocate memory for this devices private data. */ 4269 /* Allocate memory for this devices private data. */
4166 my_node = pcibus_to_node(pdev->bus); 4270 my_node = pcibus_to_node(pdev->bus);
@@ -4218,12 +4322,16 @@ static int mtip_pci_probe(struct pci_dev *pdev,
4218 dd->pdev = pdev; 4322 dd->pdev = pdev;
4219 dd->numa_node = my_node; 4323 dd->numa_node = my_node;
4220 4324
4325 INIT_LIST_HEAD(&dd->online_list);
4326 INIT_LIST_HEAD(&dd->remove_list);
4327
4221 memset(dd->workq_name, 0, 32); 4328 memset(dd->workq_name, 0, 32);
4222 snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); 4329 snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
4223 4330
4224 dd->isr_workq = create_workqueue(dd->workq_name); 4331 dd->isr_workq = create_workqueue(dd->workq_name);
4225 if (!dd->isr_workq) { 4332 if (!dd->isr_workq) {
4226 dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); 4333 dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
4334 rv = -ENOMEM;
4227 goto block_initialize_err; 4335 goto block_initialize_err;
4228 } 4336 }
4229 4337
@@ -4282,7 +4390,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
4282 INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); 4390 INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
4283 4391
4284 pci_set_master(pdev); 4392 pci_set_master(pdev);
4285 if (pci_enable_msi(pdev)) { 4393 rv = pci_enable_msi(pdev);
4394 if (rv) {
4286 dev_warn(&pdev->dev, 4395 dev_warn(&pdev->dev,
4287 "Unable to enable MSI interrupt.\n"); 4396 "Unable to enable MSI interrupt.\n");
4288 goto block_initialize_err; 4397 goto block_initialize_err;
@@ -4303,6 +4412,14 @@ static int mtip_pci_probe(struct pci_dev *pdev,
4303 instance++; 4412 instance++;
4304 if (rv != MTIP_FTL_REBUILD_MAGIC) 4413 if (rv != MTIP_FTL_REBUILD_MAGIC)
4305 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); 4414 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
4415 else
4416 rv = 0; /* device in rebuild state, return 0 from probe */
4417
4418 /* Add to online list even if in ftl rebuild */
4419 spin_lock_irqsave(&dev_lock, flags);
4420 list_add(&dd->online_list, &online_list);
4421 spin_unlock_irqrestore(&dev_lock, flags);
4422
4306 goto done; 4423 goto done;
4307 4424
4308block_initialize_err: 4425block_initialize_err:
@@ -4336,9 +4453,15 @@ static void mtip_pci_remove(struct pci_dev *pdev)
4336{ 4453{
4337 struct driver_data *dd = pci_get_drvdata(pdev); 4454 struct driver_data *dd = pci_get_drvdata(pdev);
4338 int counter = 0; 4455 int counter = 0;
4456 unsigned long flags;
4339 4457
4340 set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); 4458 set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
4341 4459
4460 spin_lock_irqsave(&dev_lock, flags);
4461 list_del_init(&dd->online_list);
4462 list_add(&dd->remove_list, &removing_list);
4463 spin_unlock_irqrestore(&dev_lock, flags);
4464
4342 if (mtip_check_surprise_removal(pdev)) { 4465 if (mtip_check_surprise_removal(pdev)) {
4343 while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { 4466 while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
4344 counter++; 4467 counter++;
@@ -4364,6 +4487,10 @@ static void mtip_pci_remove(struct pci_dev *pdev)
4364 4487
4365 pci_disable_msi(pdev); 4488 pci_disable_msi(pdev);
4366 4489
4490 spin_lock_irqsave(&dev_lock, flags);
4491 list_del_init(&dd->remove_list);
4492 spin_unlock_irqrestore(&dev_lock, flags);
4493
4367 kfree(dd); 4494 kfree(dd);
4368 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); 4495 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
4369} 4496}
@@ -4511,6 +4638,11 @@ static int __init mtip_init(void)
4511 4638
4512 pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); 4639 pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
4513 4640
4641 spin_lock_init(&dev_lock);
4642
4643 INIT_LIST_HEAD(&online_list);
4644 INIT_LIST_HEAD(&removing_list);
4645
4514 /* Allocate a major block device number to use with this driver. */ 4646 /* Allocate a major block device number to use with this driver. */
4515 error = register_blkdev(0, MTIP_DRV_NAME); 4647 error = register_blkdev(0, MTIP_DRV_NAME);
4516 if (error <= 0) { 4648 if (error <= 0) {
@@ -4520,11 +4652,18 @@ static int __init mtip_init(void)
4520 } 4652 }
4521 mtip_major = error; 4653 mtip_major = error;
4522 4654
4523 if (!dfs_parent) { 4655 dfs_parent = debugfs_create_dir("rssd", NULL);
4524 dfs_parent = debugfs_create_dir("rssd", NULL); 4656 if (IS_ERR_OR_NULL(dfs_parent)) {
4525 if (IS_ERR_OR_NULL(dfs_parent)) { 4657 pr_warn("Error creating debugfs parent\n");
4526 pr_warn("Error creating debugfs parent\n"); 4658 dfs_parent = NULL;
4527 dfs_parent = NULL; 4659 }
4660 if (dfs_parent) {
4661 dfs_device_status = debugfs_create_file("device_status",
4662 S_IRUGO, dfs_parent, NULL,
4663 &mtip_device_status_fops);
4664 if (IS_ERR_OR_NULL(dfs_device_status)) {
4665 pr_err("Error creating device_status node\n");
4666 dfs_device_status = NULL;
4528 } 4667 }
4529 } 4668 }
4530 4669
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 3bffff5f670c..8e8334c9dd0f 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -129,9 +129,9 @@ enum {
129 MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */ 129 MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
130 MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */ 130 MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
131 MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */ 131 MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
132 MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \ 132 MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) |
133 (1 << MTIP_PF_EH_ACTIVE_BIT) | \ 133 (1 << MTIP_PF_EH_ACTIVE_BIT) |
134 (1 << MTIP_PF_SE_ACTIVE_BIT) | \ 134 (1 << MTIP_PF_SE_ACTIVE_BIT) |
135 (1 << MTIP_PF_DM_ACTIVE_BIT)), 135 (1 << MTIP_PF_DM_ACTIVE_BIT)),
136 136
137 MTIP_PF_SVC_THD_ACTIVE_BIT = 4, 137 MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
@@ -144,9 +144,9 @@ enum {
144 MTIP_DDF_REMOVE_PENDING_BIT = 1, 144 MTIP_DDF_REMOVE_PENDING_BIT = 1,
145 MTIP_DDF_OVER_TEMP_BIT = 2, 145 MTIP_DDF_OVER_TEMP_BIT = 2,
146 MTIP_DDF_WRITE_PROTECT_BIT = 3, 146 MTIP_DDF_WRITE_PROTECT_BIT = 3,
147 MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ 147 MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
148 (1 << MTIP_DDF_SEC_LOCK_BIT) | \ 148 (1 << MTIP_DDF_SEC_LOCK_BIT) |
149 (1 << MTIP_DDF_OVER_TEMP_BIT) | \ 149 (1 << MTIP_DDF_OVER_TEMP_BIT) |
150 (1 << MTIP_DDF_WRITE_PROTECT_BIT)), 150 (1 << MTIP_DDF_WRITE_PROTECT_BIT)),
151 151
152 MTIP_DDF_CLEANUP_BIT = 5, 152 MTIP_DDF_CLEANUP_BIT = 5,
@@ -180,7 +180,7 @@ struct mtip_work {
180 180
181#define MTIP_TRIM_TIMEOUT_MS 240000 181#define MTIP_TRIM_TIMEOUT_MS 240000
182#define MTIP_MAX_TRIM_ENTRIES 8 182#define MTIP_MAX_TRIM_ENTRIES 8
183#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 183#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
184 184
185struct mtip_trim_entry { 185struct mtip_trim_entry {
186 u32 lba; /* starting lba of region */ 186 u32 lba; /* starting lba of region */
@@ -501,6 +501,10 @@ struct driver_data {
501 atomic_t irq_workers_active; 501 atomic_t irq_workers_active;
502 502
503 int isr_binding; 503 int isr_binding;
504
505 struct list_head online_list; /* linkage for online list */
506
507 struct list_head remove_list; /* linkage for removing list */
504}; 508};
505 509
506#endif 510#endif
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 6c81a4c040b9..f556f8a8b3f9 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1264,6 +1264,32 @@ static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1264 return atomic_read(&obj_request->done) != 0; 1264 return atomic_read(&obj_request->done) != 0;
1265} 1265}
1266 1266
1267static void
1268rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1269{
1270 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1271 obj_request, obj_request->img_request, obj_request->result,
1272 obj_request->xferred, obj_request->length);
1273 /*
1274 * ENOENT means a hole in the image. We zero-fill the
1275 * entire length of the request. A short read also implies
1276 * zero-fill to the end of the request. Either way we
1277 * update the xferred count to indicate the whole request
1278 * was satisfied.
1279 */
1280 BUG_ON(obj_request->type != OBJ_REQUEST_BIO);
1281 if (obj_request->result == -ENOENT) {
1282 zero_bio_chain(obj_request->bio_list, 0);
1283 obj_request->result = 0;
1284 obj_request->xferred = obj_request->length;
1285 } else if (obj_request->xferred < obj_request->length &&
1286 !obj_request->result) {
1287 zero_bio_chain(obj_request->bio_list, obj_request->xferred);
1288 obj_request->xferred = obj_request->length;
1289 }
1290 obj_request_done_set(obj_request);
1291}
1292
1267static void rbd_obj_request_complete(struct rbd_obj_request *obj_request) 1293static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1268{ 1294{
1269 dout("%s: obj %p cb %p\n", __func__, obj_request, 1295 dout("%s: obj %p cb %p\n", __func__, obj_request,
@@ -1284,23 +1310,10 @@ static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1284{ 1310{
1285 dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request, 1311 dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
1286 obj_request->result, obj_request->xferred, obj_request->length); 1312 obj_request->result, obj_request->xferred, obj_request->length);
1287 /* 1313 if (obj_request->img_request)
1288 * ENOENT means a hole in the object. We zero-fill the 1314 rbd_img_obj_request_read_callback(obj_request);
1289 * entire length of the request. A short read also implies 1315 else
1290 * zero-fill to the end of the request. Either way we 1316 obj_request_done_set(obj_request);
1291 * update the xferred count to indicate the whole request
1292 * was satisfied.
1293 */
1294 if (obj_request->result == -ENOENT) {
1295 zero_bio_chain(obj_request->bio_list, 0);
1296 obj_request->result = 0;
1297 obj_request->xferred = obj_request->length;
1298 } else if (obj_request->xferred < obj_request->length &&
1299 !obj_request->result) {
1300 zero_bio_chain(obj_request->bio_list, obj_request->xferred);
1301 obj_request->xferred = obj_request->length;
1302 }
1303 obj_request_done_set(obj_request);
1304} 1317}
1305 1318
1306static void rbd_osd_write_callback(struct rbd_obj_request *obj_request) 1319static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
diff --git a/drivers/block/rsxx/Makefile b/drivers/block/rsxx/Makefile
index f35cd0b71f7b..b1c53c0aa450 100644
--- a/drivers/block/rsxx/Makefile
+++ b/drivers/block/rsxx/Makefile
@@ -1,2 +1,2 @@
1obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o 1obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
2rsxx-y := config.o core.o cregs.o dev.o dma.o 2rsxx-objs := config.o core.o cregs.o dev.o dma.o
diff --git a/drivers/block/rsxx/config.c b/drivers/block/rsxx/config.c
index a295e7e9ee41..10cd530d3e10 100644
--- a/drivers/block/rsxx/config.c
+++ b/drivers/block/rsxx/config.c
@@ -29,15 +29,13 @@
29#include "rsxx_priv.h" 29#include "rsxx_priv.h"
30#include "rsxx_cfg.h" 30#include "rsxx_cfg.h"
31 31
32static void initialize_config(void *config) 32static void initialize_config(struct rsxx_card_cfg *cfg)
33{ 33{
34 struct rsxx_card_cfg *cfg = config;
35
36 cfg->hdr.version = RSXX_CFG_VERSION; 34 cfg->hdr.version = RSXX_CFG_VERSION;
37 35
38 cfg->data.block_size = RSXX_HW_BLK_SIZE; 36 cfg->data.block_size = RSXX_HW_BLK_SIZE;
39 cfg->data.stripe_size = RSXX_HW_BLK_SIZE; 37 cfg->data.stripe_size = RSXX_HW_BLK_SIZE;
40 cfg->data.vendor_id = RSXX_VENDOR_ID_TMS_IBM; 38 cfg->data.vendor_id = RSXX_VENDOR_ID_IBM;
41 cfg->data.cache_order = (-1); 39 cfg->data.cache_order = (-1);
42 cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED; 40 cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED;
43 cfg->data.intr_coal.count = 0; 41 cfg->data.intr_coal.count = 0;
@@ -181,7 +179,7 @@ int rsxx_load_config(struct rsxx_cardinfo *card)
181 } else { 179 } else {
182 dev_info(CARD_TO_DEV(card), 180 dev_info(CARD_TO_DEV(card),
183 "Initializing card configuration.\n"); 181 "Initializing card configuration.\n");
184 initialize_config(card); 182 initialize_config(&card->config);
185 st = rsxx_save_config(card); 183 st = rsxx_save_config(card);
186 if (st) 184 if (st)
187 return st; 185 return st;
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index e5162487686a..5af21f2db29c 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -30,6 +30,7 @@
30#include <linux/reboot.h> 30#include <linux/reboot.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <linux/delay.h>
33 34
34#include <linux/genhd.h> 35#include <linux/genhd.h>
35#include <linux/idr.h> 36#include <linux/idr.h>
@@ -39,8 +40,8 @@
39 40
40#define NO_LEGACY 0 41#define NO_LEGACY 0
41 42
42MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver"); 43MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver");
43MODULE_AUTHOR("IBM <support@ramsan.com>"); 44MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
44MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
45MODULE_VERSION(DRIVER_VERSION); 46MODULE_VERSION(DRIVER_VERSION);
46 47
@@ -52,6 +53,13 @@ static DEFINE_IDA(rsxx_disk_ida);
52static DEFINE_SPINLOCK(rsxx_ida_lock); 53static DEFINE_SPINLOCK(rsxx_ida_lock);
53 54
54/*----------------- Interrupt Control & Handling -------------------*/ 55/*----------------- Interrupt Control & Handling -------------------*/
56
57static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
58{
59 card->isr_mask = 0;
60 card->ier_mask = 0;
61}
62
55static void __enable_intr(unsigned int *mask, unsigned int intr) 63static void __enable_intr(unsigned int *mask, unsigned int intr)
56{ 64{
57 *mask |= intr; 65 *mask |= intr;
@@ -71,7 +79,8 @@ static void __disable_intr(unsigned int *mask, unsigned int intr)
71 */ 79 */
72void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) 80void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
73{ 81{
74 if (unlikely(card->halt)) 82 if (unlikely(card->halt) ||
83 unlikely(card->eeh_state))
75 return; 84 return;
76 85
77 __enable_intr(&card->ier_mask, intr); 86 __enable_intr(&card->ier_mask, intr);
@@ -80,6 +89,9 @@ void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
80 89
81void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) 90void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
82{ 91{
92 if (unlikely(card->eeh_state))
93 return;
94
83 __disable_intr(&card->ier_mask, intr); 95 __disable_intr(&card->ier_mask, intr);
84 iowrite32(card->ier_mask, card->regmap + IER); 96 iowrite32(card->ier_mask, card->regmap + IER);
85} 97}
@@ -87,7 +99,8 @@ void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
87void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, 99void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
88 unsigned int intr) 100 unsigned int intr)
89{ 101{
90 if (unlikely(card->halt)) 102 if (unlikely(card->halt) ||
103 unlikely(card->eeh_state))
91 return; 104 return;
92 105
93 __enable_intr(&card->isr_mask, intr); 106 __enable_intr(&card->isr_mask, intr);
@@ -97,6 +110,9 @@ void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
97void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card, 110void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
98 unsigned int intr) 111 unsigned int intr)
99{ 112{
113 if (unlikely(card->eeh_state))
114 return;
115
100 __disable_intr(&card->isr_mask, intr); 116 __disable_intr(&card->isr_mask, intr);
101 __disable_intr(&card->ier_mask, intr); 117 __disable_intr(&card->ier_mask, intr);
102 iowrite32(card->ier_mask, card->regmap + IER); 118 iowrite32(card->ier_mask, card->regmap + IER);
@@ -115,6 +131,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
115 do { 131 do {
116 reread_isr = 0; 132 reread_isr = 0;
117 133
134 if (unlikely(card->eeh_state))
135 break;
136
118 isr = ioread32(card->regmap + ISR); 137 isr = ioread32(card->regmap + ISR);
119 if (isr == 0xffffffff) { 138 if (isr == 0xffffffff) {
120 /* 139 /*
@@ -161,9 +180,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
161} 180}
162 181
163/*----------------- Card Event Handler -------------------*/ 182/*----------------- Card Event Handler -------------------*/
164static char *rsxx_card_state_to_str(unsigned int state) 183static const char * const rsxx_card_state_to_str(unsigned int state)
165{ 184{
166 static char *state_strings[] = { 185 static const char * const state_strings[] = {
167 "Unknown", "Shutdown", "Starting", "Formatting", 186 "Unknown", "Shutdown", "Starting", "Formatting",
168 "Uninitialized", "Good", "Shutting Down", 187 "Uninitialized", "Good", "Shutting Down",
169 "Fault", "Read Only Fault", "dStroying" 188 "Fault", "Read Only Fault", "dStroying"
@@ -304,6 +323,192 @@ static int card_shutdown(struct rsxx_cardinfo *card)
304 return 0; 323 return 0;
305} 324}
306 325
326static int rsxx_eeh_frozen(struct pci_dev *dev)
327{
328 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
329 int i;
330 int st;
331
332 dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n");
333
334 card->eeh_state = 1;
335 rsxx_mask_interrupts(card);
336
337 /*
338 * We need to guarantee that the write for eeh_state and masking
339 * interrupts does not become reordered. This will prevent a possible
340 * race condition with the EEH code.
341 */
342 wmb();
343
344 pci_disable_device(dev);
345
346 st = rsxx_eeh_save_issued_dmas(card);
347 if (st)
348 return st;
349
350 rsxx_eeh_save_issued_creg(card);
351
352 for (i = 0; i < card->n_targets; i++) {
353 if (card->ctrl[i].status.buf)
354 pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
355 card->ctrl[i].status.buf,
356 card->ctrl[i].status.dma_addr);
357 if (card->ctrl[i].cmd.buf)
358 pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
359 card->ctrl[i].cmd.buf,
360 card->ctrl[i].cmd.dma_addr);
361 }
362
363 return 0;
364}
365
366static void rsxx_eeh_failure(struct pci_dev *dev)
367{
368 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
369 int i;
370
371 dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n");
372
373 card->eeh_state = 1;
374
375 for (i = 0; i < card->n_targets; i++)
376 del_timer_sync(&card->ctrl[i].activity_timer);
377
378 rsxx_eeh_cancel_dmas(card);
379}
380
381static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
382{
383 unsigned int status;
384 int iter = 0;
385
386 /* We need to wait for the hardware to reset */
387 while (iter++ < 10) {
388 status = ioread32(card->regmap + PCI_RECONFIG);
389
390 if (status & RSXX_FLUSH_BUSY) {
391 ssleep(1);
392 continue;
393 }
394
395 if (status & RSXX_FLUSH_TIMEOUT)
396 dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n");
397 return 0;
398 }
399
400 /* Hardware failed resetting itself. */
401 return -1;
402}
403
404static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
405 enum pci_channel_state error)
406{
407 int st;
408
409 if (dev->revision < RSXX_EEH_SUPPORT)
410 return PCI_ERS_RESULT_NONE;
411
412 if (error == pci_channel_io_perm_failure) {
413 rsxx_eeh_failure(dev);
414 return PCI_ERS_RESULT_DISCONNECT;
415 }
416
417 st = rsxx_eeh_frozen(dev);
418 if (st) {
419 dev_err(&dev->dev, "Slot reset setup failed\n");
420 rsxx_eeh_failure(dev);
421 return PCI_ERS_RESULT_DISCONNECT;
422 }
423
424 return PCI_ERS_RESULT_NEED_RESET;
425}
426
427static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
428{
429 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
430 unsigned long flags;
431 int i;
432 int st;
433
434 dev_warn(&dev->dev,
435 "IBM FlashSystem PCI: recovering from slot reset.\n");
436
437 st = pci_enable_device(dev);
438 if (st)
439 goto failed_hw_setup;
440
441 pci_set_master(dev);
442
443 st = rsxx_eeh_fifo_flush_poll(card);
444 if (st)
445 goto failed_hw_setup;
446
447 rsxx_dma_queue_reset(card);
448
449 for (i = 0; i < card->n_targets; i++) {
450 st = rsxx_hw_buffers_init(dev, &card->ctrl[i]);
451 if (st)
452 goto failed_hw_buffers_init;
453 }
454
455 if (card->config_valid)
456 rsxx_dma_configure(card);
457
458 /* Clears the ISR register from spurious interrupts */
459 st = ioread32(card->regmap + ISR);
460
461 card->eeh_state = 0;
462
463 st = rsxx_eeh_remap_dmas(card);
464 if (st)
465 goto failed_remap_dmas;
466
467 spin_lock_irqsave(&card->irq_lock, flags);
468 if (card->n_targets & RSXX_MAX_TARGETS)
469 rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
470 else
471 rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C);
472 spin_unlock_irqrestore(&card->irq_lock, flags);
473
474 rsxx_kick_creg_queue(card);
475
476 for (i = 0; i < card->n_targets; i++) {
477 spin_lock(&card->ctrl[i].queue_lock);
478 if (list_empty(&card->ctrl[i].queue)) {
479 spin_unlock(&card->ctrl[i].queue_lock);
480 continue;
481 }
482 spin_unlock(&card->ctrl[i].queue_lock);
483
484 queue_work(card->ctrl[i].issue_wq,
485 &card->ctrl[i].issue_dma_work);
486 }
487
488 dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n");
489
490 return PCI_ERS_RESULT_RECOVERED;
491
492failed_hw_buffers_init:
493failed_remap_dmas:
494 for (i = 0; i < card->n_targets; i++) {
495 if (card->ctrl[i].status.buf)
496 pci_free_consistent(card->dev,
497 STATUS_BUFFER_SIZE8,
498 card->ctrl[i].status.buf,
499 card->ctrl[i].status.dma_addr);
500 if (card->ctrl[i].cmd.buf)
501 pci_free_consistent(card->dev,
502 COMMAND_BUFFER_SIZE8,
503 card->ctrl[i].cmd.buf,
504 card->ctrl[i].cmd.dma_addr);
505 }
506failed_hw_setup:
507 rsxx_eeh_failure(dev);
508 return PCI_ERS_RESULT_DISCONNECT;
509
510}
511
307/*----------------- Driver Initialization & Setup -------------------*/ 512/*----------------- Driver Initialization & Setup -------------------*/
308/* Returns: 0 if the driver is compatible with the device 513/* Returns: 0 if the driver is compatible with the device
309 -1 if the driver is NOT compatible with the device */ 514 -1 if the driver is NOT compatible with the device */
@@ -383,6 +588,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
383 588
384 spin_lock_init(&card->irq_lock); 589 spin_lock_init(&card->irq_lock);
385 card->halt = 0; 590 card->halt = 0;
591 card->eeh_state = 0;
386 592
387 spin_lock_irq(&card->irq_lock); 593 spin_lock_irq(&card->irq_lock);
388 rsxx_disable_ier_and_isr(card, CR_INTR_ALL); 594 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
@@ -538,9 +744,6 @@ static void rsxx_pci_remove(struct pci_dev *dev)
538 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); 744 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
539 spin_unlock_irqrestore(&card->irq_lock, flags); 745 spin_unlock_irqrestore(&card->irq_lock, flags);
540 746
541 /* Prevent work_structs from re-queuing themselves. */
542 card->halt = 1;
543
544 cancel_work_sync(&card->event_work); 747 cancel_work_sync(&card->event_work);
545 748
546 rsxx_destroy_dev(card); 749 rsxx_destroy_dev(card);
@@ -549,6 +752,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
549 spin_lock_irqsave(&card->irq_lock, flags); 752 spin_lock_irqsave(&card->irq_lock, flags);
550 rsxx_disable_ier_and_isr(card, CR_INTR_ALL); 753 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
551 spin_unlock_irqrestore(&card->irq_lock, flags); 754 spin_unlock_irqrestore(&card->irq_lock, flags);
755
756 /* Prevent work_structs from re-queuing themselves. */
757 card->halt = 1;
758
552 free_irq(dev->irq, card); 759 free_irq(dev->irq, card);
553 760
554 if (!force_legacy) 761 if (!force_legacy)
@@ -592,11 +799,14 @@ static void rsxx_pci_shutdown(struct pci_dev *dev)
592 card_shutdown(card); 799 card_shutdown(card);
593} 800}
594 801
802static const struct pci_error_handlers rsxx_err_handler = {
803 .error_detected = rsxx_error_detected,
804 .slot_reset = rsxx_slot_reset,
805};
806
595static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = { 807static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
596 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)}, 808 {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)},
597 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)}, 809 {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)},
598 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
599 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
600 {0,}, 810 {0,},
601}; 811};
602 812
@@ -609,6 +819,7 @@ static struct pci_driver rsxx_pci_driver = {
609 .remove = rsxx_pci_remove, 819 .remove = rsxx_pci_remove,
610 .suspend = rsxx_pci_suspend, 820 .suspend = rsxx_pci_suspend,
611 .shutdown = rsxx_pci_shutdown, 821 .shutdown = rsxx_pci_shutdown,
822 .err_handler = &rsxx_err_handler,
612}; 823};
613 824
614static int __init rsxx_core_init(void) 825static int __init rsxx_core_init(void)
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
index 80bbe639fccd..4b5c020a0a65 100644
--- a/drivers/block/rsxx/cregs.c
+++ b/drivers/block/rsxx/cregs.c
@@ -58,7 +58,7 @@ static struct kmem_cache *creg_cmd_pool;
58#error Unknown endianess!!! Aborting... 58#error Unknown endianess!!! Aborting...
59#endif 59#endif
60 60
61static void copy_to_creg_data(struct rsxx_cardinfo *card, 61static int copy_to_creg_data(struct rsxx_cardinfo *card,
62 int cnt8, 62 int cnt8,
63 void *buf, 63 void *buf,
64 unsigned int stream) 64 unsigned int stream)
@@ -66,6 +66,9 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
66 int i = 0; 66 int i = 0;
67 u32 *data = buf; 67 u32 *data = buf;
68 68
69 if (unlikely(card->eeh_state))
70 return -EIO;
71
69 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { 72 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
70 /* 73 /*
71 * Firmware implementation makes it necessary to byte swap on 74 * Firmware implementation makes it necessary to byte swap on
@@ -76,10 +79,12 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
76 else 79 else
77 iowrite32(data[i], card->regmap + CREG_DATA(i)); 80 iowrite32(data[i], card->regmap + CREG_DATA(i));
78 } 81 }
82
83 return 0;
79} 84}
80 85
81 86
82static void copy_from_creg_data(struct rsxx_cardinfo *card, 87static int copy_from_creg_data(struct rsxx_cardinfo *card,
83 int cnt8, 88 int cnt8,
84 void *buf, 89 void *buf,
85 unsigned int stream) 90 unsigned int stream)
@@ -87,6 +92,9 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
87 int i = 0; 92 int i = 0;
88 u32 *data = buf; 93 u32 *data = buf;
89 94
95 if (unlikely(card->eeh_state))
96 return -EIO;
97
90 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { 98 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
91 /* 99 /*
92 * Firmware implementation makes it necessary to byte swap on 100 * Firmware implementation makes it necessary to byte swap on
@@ -97,41 +105,31 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
97 else 105 else
98 data[i] = ioread32(card->regmap + CREG_DATA(i)); 106 data[i] = ioread32(card->regmap + CREG_DATA(i));
99 } 107 }
100}
101
102static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
103{
104 struct creg_cmd *cmd;
105 108
106 /* 109 return 0;
107 * Spin lock is needed because this can be called in atomic/interrupt
108 * context.
109 */
110 spin_lock_bh(&card->creg_ctrl.lock);
111 cmd = card->creg_ctrl.active_cmd;
112 card->creg_ctrl.active_cmd = NULL;
113 spin_unlock_bh(&card->creg_ctrl.lock);
114
115 return cmd;
116} 110}
117 111
118static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd) 112static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
119{ 113{
114 int st;
115
116 if (unlikely(card->eeh_state))
117 return;
118
120 iowrite32(cmd->addr, card->regmap + CREG_ADD); 119 iowrite32(cmd->addr, card->regmap + CREG_ADD);
121 iowrite32(cmd->cnt8, card->regmap + CREG_CNT); 120 iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
122 121
123 if (cmd->op == CREG_OP_WRITE) { 122 if (cmd->op == CREG_OP_WRITE) {
124 if (cmd->buf) 123 if (cmd->buf) {
125 copy_to_creg_data(card, cmd->cnt8, 124 st = copy_to_creg_data(card, cmd->cnt8,
126 cmd->buf, cmd->stream); 125 cmd->buf, cmd->stream);
126 if (st)
127 return;
128 }
127 } 129 }
128 130
129 /* 131 if (unlikely(card->eeh_state))
130 * Data copy must complete before initiating the command. This is 132 return;
131 * needed for weakly ordered processors (i.e. PowerPC), so that all
132 * neccessary registers are written before we kick the hardware.
133 */
134 wmb();
135 133
136 /* Setting the valid bit will kick off the command. */ 134 /* Setting the valid bit will kick off the command. */
137 iowrite32(cmd->op, card->regmap + CREG_CMD); 135 iowrite32(cmd->op, card->regmap + CREG_CMD);
@@ -196,11 +194,11 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card,
196 cmd->cb_private = cb_private; 194 cmd->cb_private = cb_private;
197 cmd->status = 0; 195 cmd->status = 0;
198 196
199 spin_lock(&card->creg_ctrl.lock); 197 spin_lock_bh(&card->creg_ctrl.lock);
200 list_add_tail(&cmd->list, &card->creg_ctrl.queue); 198 list_add_tail(&cmd->list, &card->creg_ctrl.queue);
201 card->creg_ctrl.q_depth++; 199 card->creg_ctrl.q_depth++;
202 creg_kick_queue(card); 200 creg_kick_queue(card);
203 spin_unlock(&card->creg_ctrl.lock); 201 spin_unlock_bh(&card->creg_ctrl.lock);
204 202
205 return 0; 203 return 0;
206} 204}
@@ -210,7 +208,11 @@ static void creg_cmd_timed_out(unsigned long data)
210 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data; 208 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
211 struct creg_cmd *cmd; 209 struct creg_cmd *cmd;
212 210
213 cmd = pop_active_cmd(card); 211 spin_lock(&card->creg_ctrl.lock);
212 cmd = card->creg_ctrl.active_cmd;
213 card->creg_ctrl.active_cmd = NULL;
214 spin_unlock(&card->creg_ctrl.lock);
215
214 if (cmd == NULL) { 216 if (cmd == NULL) {
215 card->creg_ctrl.creg_stats.creg_timeout++; 217 card->creg_ctrl.creg_stats.creg_timeout++;
216 dev_warn(CARD_TO_DEV(card), 218 dev_warn(CARD_TO_DEV(card),
@@ -247,7 +249,11 @@ static void creg_cmd_done(struct work_struct *work)
247 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0) 249 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
248 card->creg_ctrl.creg_stats.failed_cancel_timer++; 250 card->creg_ctrl.creg_stats.failed_cancel_timer++;
249 251
250 cmd = pop_active_cmd(card); 252 spin_lock_bh(&card->creg_ctrl.lock);
253 cmd = card->creg_ctrl.active_cmd;
254 card->creg_ctrl.active_cmd = NULL;
255 spin_unlock_bh(&card->creg_ctrl.lock);
256
251 if (cmd == NULL) { 257 if (cmd == NULL) {
252 dev_err(CARD_TO_DEV(card), 258 dev_err(CARD_TO_DEV(card),
253 "Spurious creg interrupt!\n"); 259 "Spurious creg interrupt!\n");
@@ -287,7 +293,7 @@ static void creg_cmd_done(struct work_struct *work)
287 goto creg_done; 293 goto creg_done;
288 } 294 }
289 295
290 copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream); 296 st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
291 } 297 }
292 298
293creg_done: 299creg_done:
@@ -296,10 +302,10 @@ creg_done:
296 302
297 kmem_cache_free(creg_cmd_pool, cmd); 303 kmem_cache_free(creg_cmd_pool, cmd);
298 304
299 spin_lock(&card->creg_ctrl.lock); 305 spin_lock_bh(&card->creg_ctrl.lock);
300 card->creg_ctrl.active = 0; 306 card->creg_ctrl.active = 0;
301 creg_kick_queue(card); 307 creg_kick_queue(card);
302 spin_unlock(&card->creg_ctrl.lock); 308 spin_unlock_bh(&card->creg_ctrl.lock);
303} 309}
304 310
305static void creg_reset(struct rsxx_cardinfo *card) 311static void creg_reset(struct rsxx_cardinfo *card)
@@ -324,7 +330,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
324 "Resetting creg interface for recovery\n"); 330 "Resetting creg interface for recovery\n");
325 331
326 /* Cancel outstanding commands */ 332 /* Cancel outstanding commands */
327 spin_lock(&card->creg_ctrl.lock); 333 spin_lock_bh(&card->creg_ctrl.lock);
328 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { 334 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
329 list_del(&cmd->list); 335 list_del(&cmd->list);
330 card->creg_ctrl.q_depth--; 336 card->creg_ctrl.q_depth--;
@@ -345,7 +351,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
345 351
346 card->creg_ctrl.active = 0; 352 card->creg_ctrl.active = 0;
347 } 353 }
348 spin_unlock(&card->creg_ctrl.lock); 354 spin_unlock_bh(&card->creg_ctrl.lock);
349 355
350 card->creg_ctrl.reset = 0; 356 card->creg_ctrl.reset = 0;
351 spin_lock_irqsave(&card->irq_lock, flags); 357 spin_lock_irqsave(&card->irq_lock, flags);
@@ -399,12 +405,12 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card,
399 return st; 405 return st;
400 406
401 /* 407 /*
402 * This timeout is neccessary for unresponsive hardware. The additional 408 * This timeout is necessary for unresponsive hardware. The additional
403 * 20 seconds to used to guarantee that each cregs requests has time to 409 * 20 seconds to used to guarantee that each cregs requests has time to
404 * complete. 410 * complete.
405 */ 411 */
406 timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC * 412 timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
407 card->creg_ctrl.q_depth) + 20000); 413 card->creg_ctrl.q_depth + 20000);
408 414
409 /* 415 /*
410 * The creg interface is guaranteed to complete. It has a timeout 416 * The creg interface is guaranteed to complete. It has a timeout
@@ -690,6 +696,32 @@ int rsxx_reg_access(struct rsxx_cardinfo *card,
690 return 0; 696 return 0;
691} 697}
692 698
699void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
700{
701 struct creg_cmd *cmd = NULL;
702
703 cmd = card->creg_ctrl.active_cmd;
704 card->creg_ctrl.active_cmd = NULL;
705
706 if (cmd) {
707 del_timer_sync(&card->creg_ctrl.cmd_timer);
708
709 spin_lock_bh(&card->creg_ctrl.lock);
710 list_add(&cmd->list, &card->creg_ctrl.queue);
711 card->creg_ctrl.q_depth++;
712 card->creg_ctrl.active = 0;
713 spin_unlock_bh(&card->creg_ctrl.lock);
714 }
715}
716
717void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
718{
719 spin_lock_bh(&card->creg_ctrl.lock);
720 if (!list_empty(&card->creg_ctrl.queue))
721 creg_kick_queue(card);
722 spin_unlock_bh(&card->creg_ctrl.lock);
723}
724
693/*------------ Initialization & Setup --------------*/ 725/*------------ Initialization & Setup --------------*/
694int rsxx_creg_setup(struct rsxx_cardinfo *card) 726int rsxx_creg_setup(struct rsxx_cardinfo *card)
695{ 727{
@@ -712,7 +744,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
712 int cnt = 0; 744 int cnt = 0;
713 745
714 /* Cancel outstanding commands */ 746 /* Cancel outstanding commands */
715 spin_lock(&card->creg_ctrl.lock); 747 spin_lock_bh(&card->creg_ctrl.lock);
716 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { 748 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
717 list_del(&cmd->list); 749 list_del(&cmd->list);
718 if (cmd->cb) 750 if (cmd->cb)
@@ -737,7 +769,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
737 "Canceled active creg command\n"); 769 "Canceled active creg command\n");
738 kmem_cache_free(creg_cmd_pool, cmd); 770 kmem_cache_free(creg_cmd_pool, cmd);
739 } 771 }
740 spin_unlock(&card->creg_ctrl.lock); 772 spin_unlock_bh(&card->creg_ctrl.lock);
741 773
742 cancel_work_sync(&card->creg_ctrl.done_work); 774 cancel_work_sync(&card->creg_ctrl.done_work);
743} 775}
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 63176e67662f..0607513cfb41 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -28,7 +28,7 @@
28struct rsxx_dma { 28struct rsxx_dma {
29 struct list_head list; 29 struct list_head list;
30 u8 cmd; 30 u8 cmd;
31 unsigned int laddr; /* Logical address on the ramsan */ 31 unsigned int laddr; /* Logical address */
32 struct { 32 struct {
33 u32 off; 33 u32 off;
34 u32 cnt; 34 u32 cnt;
@@ -81,9 +81,6 @@ enum rsxx_hw_status {
81 HW_STATUS_FAULT = 0x08, 81 HW_STATUS_FAULT = 0x08,
82}; 82};
83 83
84#define STATUS_BUFFER_SIZE8 4096
85#define COMMAND_BUFFER_SIZE8 4096
86
87static struct kmem_cache *rsxx_dma_pool; 84static struct kmem_cache *rsxx_dma_pool;
88 85
89struct dma_tracker { 86struct dma_tracker {
@@ -122,7 +119,7 @@ static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
122 return tgt; 119 return tgt;
123} 120}
124 121
125static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) 122void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
126{ 123{
127 /* Reset all DMA Command/Status Queues */ 124 /* Reset all DMA Command/Status Queues */
128 iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); 125 iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
@@ -210,7 +207,8 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
210 u32 q_depth = 0; 207 u32 q_depth = 0;
211 u32 intr_coal; 208 u32 intr_coal;
212 209
213 if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE) 210 if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE ||
211 unlikely(card->eeh_state))
214 return; 212 return;
215 213
216 for (i = 0; i < card->n_targets; i++) 214 for (i = 0; i < card->n_targets; i++)
@@ -223,31 +221,26 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
223} 221}
224 222
225/*----------------- RSXX DMA Handling -------------------*/ 223/*----------------- RSXX DMA Handling -------------------*/
226static void rsxx_complete_dma(struct rsxx_cardinfo *card, 224static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
227 struct rsxx_dma *dma, 225 struct rsxx_dma *dma,
228 unsigned int status) 226 unsigned int status)
229{ 227{
230 if (status & DMA_SW_ERR) 228 if (status & DMA_SW_ERR)
231 printk_ratelimited(KERN_ERR 229 ctrl->stats.dma_sw_err++;
232 "SW Error in DMA(cmd x%02x, laddr x%08x)\n",
233 dma->cmd, dma->laddr);
234 if (status & DMA_HW_FAULT) 230 if (status & DMA_HW_FAULT)
235 printk_ratelimited(KERN_ERR 231 ctrl->stats.dma_hw_fault++;
236 "HW Fault in DMA(cmd x%02x, laddr x%08x)\n",
237 dma->cmd, dma->laddr);
238 if (status & DMA_CANCELLED) 232 if (status & DMA_CANCELLED)
239 printk_ratelimited(KERN_ERR 233 ctrl->stats.dma_cancelled++;
240 "DMA Cancelled(cmd x%02x, laddr x%08x)\n",
241 dma->cmd, dma->laddr);
242 234
243 if (dma->dma_addr) 235 if (dma->dma_addr)
244 pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma), 236 pci_unmap_page(ctrl->card->dev, dma->dma_addr,
237 get_dma_size(dma),
245 dma->cmd == HW_CMD_BLK_WRITE ? 238 dma->cmd == HW_CMD_BLK_WRITE ?
246 PCI_DMA_TODEVICE : 239 PCI_DMA_TODEVICE :
247 PCI_DMA_FROMDEVICE); 240 PCI_DMA_FROMDEVICE);
248 241
249 if (dma->cb) 242 if (dma->cb)
250 dma->cb(card, dma->cb_data, status ? 1 : 0); 243 dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
251 244
252 kmem_cache_free(rsxx_dma_pool, dma); 245 kmem_cache_free(rsxx_dma_pool, dma);
253} 246}
@@ -330,14 +323,15 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
330 if (requeue_cmd) 323 if (requeue_cmd)
331 rsxx_requeue_dma(ctrl, dma); 324 rsxx_requeue_dma(ctrl, dma);
332 else 325 else
333 rsxx_complete_dma(ctrl->card, dma, status); 326 rsxx_complete_dma(ctrl, dma, status);
334} 327}
335 328
336static void dma_engine_stalled(unsigned long data) 329static void dma_engine_stalled(unsigned long data)
337{ 330{
338 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; 331 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
339 332
340 if (atomic_read(&ctrl->stats.hw_q_depth) == 0) 333 if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
334 unlikely(ctrl->card->eeh_state))
341 return; 335 return;
342 336
343 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { 337 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
@@ -369,7 +363,8 @@ static void rsxx_issue_dmas(struct work_struct *work)
369 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); 363 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
370 hw_cmd_buf = ctrl->cmd.buf; 364 hw_cmd_buf = ctrl->cmd.buf;
371 365
372 if (unlikely(ctrl->card->halt)) 366 if (unlikely(ctrl->card->halt) ||
367 unlikely(ctrl->card->eeh_state))
373 return; 368 return;
374 369
375 while (1) { 370 while (1) {
@@ -397,7 +392,7 @@ static void rsxx_issue_dmas(struct work_struct *work)
397 */ 392 */
398 if (unlikely(ctrl->card->dma_fault)) { 393 if (unlikely(ctrl->card->dma_fault)) {
399 push_tracker(ctrl->trackers, tag); 394 push_tracker(ctrl->trackers, tag);
400 rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED); 395 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
401 continue; 396 continue;
402 } 397 }
403 398
@@ -432,19 +427,15 @@ static void rsxx_issue_dmas(struct work_struct *work)
432 427
433 /* Let HW know we've queued commands. */ 428 /* Let HW know we've queued commands. */
434 if (cmds_pending) { 429 if (cmds_pending) {
435 /*
436 * We must guarantee that the CPU writes to 'ctrl->cmd.buf'
437 * (which is in PCI-consistent system-memory) from the loop
438 * above make it into the coherency domain before the
439 * following PIO "trigger" updating the cmd.idx. A WMB is
440 * sufficient. We need not explicitly CPU cache-flush since
441 * the memory is a PCI-consistent (ie; coherent) mapping.
442 */
443 wmb();
444
445 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); 430 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
446 mod_timer(&ctrl->activity_timer, 431 mod_timer(&ctrl->activity_timer,
447 jiffies + DMA_ACTIVITY_TIMEOUT); 432 jiffies + DMA_ACTIVITY_TIMEOUT);
433
434 if (unlikely(ctrl->card->eeh_state)) {
435 del_timer_sync(&ctrl->activity_timer);
436 return;
437 }
438
448 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); 439 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
449 } 440 }
450} 441}
@@ -463,7 +454,8 @@ static void rsxx_dma_done(struct work_struct *work)
463 hw_st_buf = ctrl->status.buf; 454 hw_st_buf = ctrl->status.buf;
464 455
465 if (unlikely(ctrl->card->halt) || 456 if (unlikely(ctrl->card->halt) ||
466 unlikely(ctrl->card->dma_fault)) 457 unlikely(ctrl->card->dma_fault) ||
458 unlikely(ctrl->card->eeh_state))
467 return; 459 return;
468 460
469 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); 461 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
@@ -508,7 +500,7 @@ static void rsxx_dma_done(struct work_struct *work)
508 if (status) 500 if (status)
509 rsxx_handle_dma_error(ctrl, dma, status); 501 rsxx_handle_dma_error(ctrl, dma, status);
510 else 502 else
511 rsxx_complete_dma(ctrl->card, dma, 0); 503 rsxx_complete_dma(ctrl, dma, 0);
512 504
513 push_tracker(ctrl->trackers, tag); 505 push_tracker(ctrl->trackers, tag);
514 506
@@ -727,20 +719,54 @@ bvec_err:
727 719
728 720
729/*----------------- DMA Engine Initialization & Setup -------------------*/ 721/*----------------- DMA Engine Initialization & Setup -------------------*/
722int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
723{
724 ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
725 &ctrl->status.dma_addr);
726 ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
727 &ctrl->cmd.dma_addr);
728 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
729 return -ENOMEM;
730
731 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
732 iowrite32(lower_32_bits(ctrl->status.dma_addr),
733 ctrl->regmap + SB_ADD_LO);
734 iowrite32(upper_32_bits(ctrl->status.dma_addr),
735 ctrl->regmap + SB_ADD_HI);
736
737 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
738 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
739 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
740
741 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
742 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
743 dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
744 ctrl->status.idx);
745 return -EINVAL;
746 }
747 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
748 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
749
750 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
751 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
752 dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
753 ctrl->status.idx);
754 return -EINVAL;
755 }
756 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
757 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
758
759 return 0;
760}
761
730static int rsxx_dma_ctrl_init(struct pci_dev *dev, 762static int rsxx_dma_ctrl_init(struct pci_dev *dev,
731 struct rsxx_dma_ctrl *ctrl) 763 struct rsxx_dma_ctrl *ctrl)
732{ 764{
733 int i; 765 int i;
766 int st;
734 767
735 memset(&ctrl->stats, 0, sizeof(ctrl->stats)); 768 memset(&ctrl->stats, 0, sizeof(ctrl->stats));
736 769
737 ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
738 &ctrl->status.dma_addr);
739 ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
740 &ctrl->cmd.dma_addr);
741 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
742 return -ENOMEM;
743
744 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); 770 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
745 if (!ctrl->trackers) 771 if (!ctrl->trackers)
746 return -ENOMEM; 772 return -ENOMEM;
@@ -770,35 +796,9 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
770 INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); 796 INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
771 INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); 797 INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
772 798
773 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); 799 st = rsxx_hw_buffers_init(dev, ctrl);
774 iowrite32(lower_32_bits(ctrl->status.dma_addr), 800 if (st)
775 ctrl->regmap + SB_ADD_LO); 801 return st;
776 iowrite32(upper_32_bits(ctrl->status.dma_addr),
777 ctrl->regmap + SB_ADD_HI);
778
779 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
780 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
781 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
782
783 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
784 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
785 dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
786 ctrl->status.idx);
787 return -EINVAL;
788 }
789 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
790 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
791
792 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
793 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
794 dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
795 ctrl->status.idx);
796 return -EINVAL;
797 }
798 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
799 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
800
801 wmb();
802 802
803 return 0; 803 return 0;
804} 804}
@@ -834,7 +834,7 @@ static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
834 return 0; 834 return 0;
835} 835}
836 836
837static int rsxx_dma_configure(struct rsxx_cardinfo *card) 837int rsxx_dma_configure(struct rsxx_cardinfo *card)
838{ 838{
839 u32 intr_coal; 839 u32 intr_coal;
840 840
@@ -980,6 +980,103 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
980 } 980 }
981} 981}
982 982
983int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
984{
985 int i;
986 int j;
987 int cnt;
988 struct rsxx_dma *dma;
989 struct list_head *issued_dmas;
990
991 issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets,
992 GFP_KERNEL);
993 if (!issued_dmas)
994 return -ENOMEM;
995
996 for (i = 0; i < card->n_targets; i++) {
997 INIT_LIST_HEAD(&issued_dmas[i]);
998 cnt = 0;
999 for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
1000 dma = get_tracker_dma(card->ctrl[i].trackers, j);
1001 if (dma == NULL)
1002 continue;
1003
1004 if (dma->cmd == HW_CMD_BLK_WRITE)
1005 card->ctrl[i].stats.writes_issued--;
1006 else if (dma->cmd == HW_CMD_BLK_DISCARD)
1007 card->ctrl[i].stats.discards_issued--;
1008 else
1009 card->ctrl[i].stats.reads_issued--;
1010
1011 list_add_tail(&dma->list, &issued_dmas[i]);
1012 push_tracker(card->ctrl[i].trackers, j);
1013 cnt++;
1014 }
1015
1016 spin_lock(&card->ctrl[i].queue_lock);
1017 list_splice(&issued_dmas[i], &card->ctrl[i].queue);
1018
1019 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
1020 card->ctrl[i].stats.sw_q_depth += cnt;
1021 card->ctrl[i].e_cnt = 0;
1022
1023 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1024 if (dma->dma_addr)
1025 pci_unmap_page(card->dev, dma->dma_addr,
1026 get_dma_size(dma),
1027 dma->cmd == HW_CMD_BLK_WRITE ?
1028 PCI_DMA_TODEVICE :
1029 PCI_DMA_FROMDEVICE);
1030 }
1031 spin_unlock(&card->ctrl[i].queue_lock);
1032 }
1033
1034 kfree(issued_dmas);
1035
1036 return 0;
1037}
1038
1039void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card)
1040{
1041 struct rsxx_dma *dma;
1042 struct rsxx_dma *tmp;
1043 int i;
1044
1045 for (i = 0; i < card->n_targets; i++) {
1046 spin_lock(&card->ctrl[i].queue_lock);
1047 list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) {
1048 list_del(&dma->list);
1049
1050 rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED);
1051 }
1052 spin_unlock(&card->ctrl[i].queue_lock);
1053 }
1054}
1055
1056int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
1057{
1058 struct rsxx_dma *dma;
1059 int i;
1060
1061 for (i = 0; i < card->n_targets; i++) {
1062 spin_lock(&card->ctrl[i].queue_lock);
1063 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1064 dma->dma_addr = pci_map_page(card->dev, dma->page,
1065 dma->pg_off, get_dma_size(dma),
1066 dma->cmd == HW_CMD_BLK_WRITE ?
1067 PCI_DMA_TODEVICE :
1068 PCI_DMA_FROMDEVICE);
1069 if (!dma->dma_addr) {
1070 spin_unlock(&card->ctrl[i].queue_lock);
1071 kmem_cache_free(rsxx_dma_pool, dma);
1072 return -ENOMEM;
1073 }
1074 }
1075 spin_unlock(&card->ctrl[i].queue_lock);
1076 }
1077
1078 return 0;
1079}
983 1080
984int rsxx_dma_init(void) 1081int rsxx_dma_init(void)
985{ 1082{
diff --git a/drivers/block/rsxx/rsxx.h b/drivers/block/rsxx/rsxx.h
index 2e50b65902b7..24ba3642bd89 100644
--- a/drivers/block/rsxx/rsxx.h
+++ b/drivers/block/rsxx/rsxx.h
@@ -27,15 +27,17 @@
27 27
28/*----------------- IOCTL Definitions -------------------*/ 28/*----------------- IOCTL Definitions -------------------*/
29 29
30#define RSXX_MAX_DATA 8
31
30struct rsxx_reg_access { 32struct rsxx_reg_access {
31 __u32 addr; 33 __u32 addr;
32 __u32 cnt; 34 __u32 cnt;
33 __u32 stat; 35 __u32 stat;
34 __u32 stream; 36 __u32 stream;
35 __u32 data[8]; 37 __u32 data[RSXX_MAX_DATA];
36}; 38};
37 39
38#define RSXX_MAX_REG_CNT (8 * (sizeof(__u32))) 40#define RSXX_MAX_REG_CNT (RSXX_MAX_DATA * (sizeof(__u32)))
39 41
40#define RSXX_IOC_MAGIC 'r' 42#define RSXX_IOC_MAGIC 'r'
41 43
diff --git a/drivers/block/rsxx/rsxx_cfg.h b/drivers/block/rsxx/rsxx_cfg.h
index c025fe5fdb70..f384c943846d 100644
--- a/drivers/block/rsxx/rsxx_cfg.h
+++ b/drivers/block/rsxx/rsxx_cfg.h
@@ -58,7 +58,7 @@ struct rsxx_card_cfg {
58}; 58};
59 59
60/* Vendor ID Values */ 60/* Vendor ID Values */
61#define RSXX_VENDOR_ID_TMS_IBM 0 61#define RSXX_VENDOR_ID_IBM 0
62#define RSXX_VENDOR_ID_DSI 1 62#define RSXX_VENDOR_ID_DSI 1
63#define RSXX_VENDOR_COUNT 2 63#define RSXX_VENDOR_COUNT 2
64 64
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index a1ac907d8f4c..382e8bf5c03b 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -45,16 +45,13 @@
45 45
46struct proc_cmd; 46struct proc_cmd;
47 47
48#define PCI_VENDOR_ID_TMS_IBM 0x15B6 48#define PCI_DEVICE_ID_FS70_FLASH 0x04A9
49#define PCI_DEVICE_ID_RS70_FLASH 0x0019 49#define PCI_DEVICE_ID_FS80_FLASH 0x04AA
50#define PCI_DEVICE_ID_RS70D_FLASH 0x001A
51#define PCI_DEVICE_ID_RS80_FLASH 0x001C
52#define PCI_DEVICE_ID_RS81_FLASH 0x001E
53 50
54#define RS70_PCI_REV_SUPPORTED 4 51#define RS70_PCI_REV_SUPPORTED 4
55 52
56#define DRIVER_NAME "rsxx" 53#define DRIVER_NAME "rsxx"
57#define DRIVER_VERSION "3.7" 54#define DRIVER_VERSION "4.0"
58 55
59/* Block size is 4096 */ 56/* Block size is 4096 */
60#define RSXX_HW_BLK_SHIFT 12 57#define RSXX_HW_BLK_SHIFT 12
@@ -67,6 +64,9 @@ struct proc_cmd;
67#define RSXX_MAX_OUTSTANDING_CMDS 255 64#define RSXX_MAX_OUTSTANDING_CMDS 255
68#define RSXX_CS_IDX_MASK 0xff 65#define RSXX_CS_IDX_MASK 0xff
69 66
67#define STATUS_BUFFER_SIZE8 4096
68#define COMMAND_BUFFER_SIZE8 4096
69
70#define RSXX_MAX_TARGETS 8 70#define RSXX_MAX_TARGETS 8
71 71
72struct dma_tracker_list; 72struct dma_tracker_list;
@@ -91,6 +91,9 @@ struct rsxx_dma_stats {
91 u32 discards_failed; 91 u32 discards_failed;
92 u32 done_rescheduled; 92 u32 done_rescheduled;
93 u32 issue_rescheduled; 93 u32 issue_rescheduled;
94 u32 dma_sw_err;
95 u32 dma_hw_fault;
96 u32 dma_cancelled;
94 u32 sw_q_depth; /* Number of DMAs on the SW queue. */ 97 u32 sw_q_depth; /* Number of DMAs on the SW queue. */
95 atomic_t hw_q_depth; /* Number of DMAs queued to HW. */ 98 atomic_t hw_q_depth; /* Number of DMAs queued to HW. */
96}; 99};
@@ -116,6 +119,7 @@ struct rsxx_dma_ctrl {
116struct rsxx_cardinfo { 119struct rsxx_cardinfo {
117 struct pci_dev *dev; 120 struct pci_dev *dev;
118 unsigned int halt; 121 unsigned int halt;
122 unsigned int eeh_state;
119 123
120 void __iomem *regmap; 124 void __iomem *regmap;
121 spinlock_t irq_lock; 125 spinlock_t irq_lock;
@@ -224,6 +228,7 @@ enum rsxx_pci_regmap {
224 PERF_RD512_HI = 0xac, 228 PERF_RD512_HI = 0xac,
225 PERF_WR512_LO = 0xb0, 229 PERF_WR512_LO = 0xb0,
226 PERF_WR512_HI = 0xb4, 230 PERF_WR512_HI = 0xb4,
231 PCI_RECONFIG = 0xb8,
227}; 232};
228 233
229enum rsxx_intr { 234enum rsxx_intr {
@@ -237,6 +242,8 @@ enum rsxx_intr {
237 CR_INTR_DMA5 = 0x00000080, 242 CR_INTR_DMA5 = 0x00000080,
238 CR_INTR_DMA6 = 0x00000100, 243 CR_INTR_DMA6 = 0x00000100,
239 CR_INTR_DMA7 = 0x00000200, 244 CR_INTR_DMA7 = 0x00000200,
245 CR_INTR_ALL_C = 0x0000003f,
246 CR_INTR_ALL_G = 0x000003ff,
240 CR_INTR_DMA_ALL = 0x000003f5, 247 CR_INTR_DMA_ALL = 0x000003f5,
241 CR_INTR_ALL = 0xffffffff, 248 CR_INTR_ALL = 0xffffffff,
242}; 249};
@@ -253,8 +260,14 @@ enum rsxx_pci_reset {
253 DMA_QUEUE_RESET = 0x00000001, 260 DMA_QUEUE_RESET = 0x00000001,
254}; 261};
255 262
263enum rsxx_hw_fifo_flush {
264 RSXX_FLUSH_BUSY = 0x00000002,
265 RSXX_FLUSH_TIMEOUT = 0x00000004,
266};
267
256enum rsxx_pci_revision { 268enum rsxx_pci_revision {
257 RSXX_DISCARD_SUPPORT = 2, 269 RSXX_DISCARD_SUPPORT = 2,
270 RSXX_EEH_SUPPORT = 3,
258}; 271};
259 272
260enum rsxx_creg_cmd { 273enum rsxx_creg_cmd {
@@ -360,11 +373,17 @@ int rsxx_dma_setup(struct rsxx_cardinfo *card);
360void rsxx_dma_destroy(struct rsxx_cardinfo *card); 373void rsxx_dma_destroy(struct rsxx_cardinfo *card);
361int rsxx_dma_init(void); 374int rsxx_dma_init(void);
362void rsxx_dma_cleanup(void); 375void rsxx_dma_cleanup(void);
376void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
377int rsxx_dma_configure(struct rsxx_cardinfo *card);
363int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, 378int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
364 struct bio *bio, 379 struct bio *bio,
365 atomic_t *n_dmas, 380 atomic_t *n_dmas,
366 rsxx_dma_cb cb, 381 rsxx_dma_cb cb,
367 void *cb_data); 382 void *cb_data);
383int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl);
384int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card);
385void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card);
386int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card);
368 387
369/***** cregs.c *****/ 388/***** cregs.c *****/
370int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr, 389int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
@@ -389,10 +408,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card);
389void rsxx_creg_destroy(struct rsxx_cardinfo *card); 408void rsxx_creg_destroy(struct rsxx_cardinfo *card);
390int rsxx_creg_init(void); 409int rsxx_creg_init(void);
391void rsxx_creg_cleanup(void); 410void rsxx_creg_cleanup(void);
392
393int rsxx_reg_access(struct rsxx_cardinfo *card, 411int rsxx_reg_access(struct rsxx_cardinfo *card,
394 struct rsxx_reg_access __user *ucmd, 412 struct rsxx_reg_access __user *ucmd,
395 int read); 413 int read);
414void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card);
415void rsxx_kick_creg_queue(struct rsxx_cardinfo *card);
396 416
397 417
398 418
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index de1f319f7bd7..dd5b2fed97e9 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -164,7 +164,7 @@ static void make_response(struct xen_blkif *blkif, u64 id,
164 164
165#define foreach_grant_safe(pos, n, rbtree, node) \ 165#define foreach_grant_safe(pos, n, rbtree, node) \
166 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ 166 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
167 (n) = rb_next(&(pos)->node); \ 167 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
168 &(pos)->node != NULL; \ 168 &(pos)->node != NULL; \
169 (pos) = container_of(n, typeof(*(pos)), node), \ 169 (pos) = container_of(n, typeof(*(pos)), node), \
170 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) 170 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
@@ -381,8 +381,8 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
381 381
382static void print_stats(struct xen_blkif *blkif) 382static void print_stats(struct xen_blkif *blkif)
383{ 383{
384 pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d" 384 pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
385 " | ds %4d\n", 385 " | ds %4llu\n",
386 current->comm, blkif->st_oo_req, 386 current->comm, blkif->st_oo_req,
387 blkif->st_rd_req, blkif->st_wr_req, 387 blkif->st_rd_req, blkif->st_wr_req,
388 blkif->st_f_req, blkif->st_ds_req); 388 blkif->st_f_req, blkif->st_ds_req);
@@ -442,7 +442,7 @@ int xen_blkif_schedule(void *arg)
442} 442}
443 443
444struct seg_buf { 444struct seg_buf {
445 unsigned long buf; 445 unsigned int offset;
446 unsigned int nsec; 446 unsigned int nsec;
447}; 447};
448/* 448/*
@@ -621,30 +621,21 @@ static int xen_blkbk_map(struct blkif_request *req,
621 * If this is a new persistent grant 621 * If this is a new persistent grant
622 * save the handler 622 * save the handler
623 */ 623 */
624 persistent_gnts[i]->handle = map[j].handle; 624 persistent_gnts[i]->handle = map[j++].handle;
625 persistent_gnts[i]->dev_bus_addr =
626 map[j++].dev_bus_addr;
627 } 625 }
628 pending_handle(pending_req, i) = 626 pending_handle(pending_req, i) =
629 persistent_gnts[i]->handle; 627 persistent_gnts[i]->handle;
630 628
631 if (ret) 629 if (ret)
632 continue; 630 continue;
633
634 seg[i].buf = persistent_gnts[i]->dev_bus_addr |
635 (req->u.rw.seg[i].first_sect << 9);
636 } else { 631 } else {
637 pending_handle(pending_req, i) = map[j].handle; 632 pending_handle(pending_req, i) = map[j++].handle;
638 bitmap_set(pending_req->unmap_seg, i, 1); 633 bitmap_set(pending_req->unmap_seg, i, 1);
639 634
640 if (ret) { 635 if (ret)
641 j++;
642 continue; 636 continue;
643 }
644
645 seg[i].buf = map[j++].dev_bus_addr |
646 (req->u.rw.seg[i].first_sect << 9);
647 } 637 }
638 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
648 } 639 }
649 return ret; 640 return ret;
650} 641}
@@ -679,6 +670,16 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
679 return err; 670 return err;
680} 671}
681 672
673static int dispatch_other_io(struct xen_blkif *blkif,
674 struct blkif_request *req,
675 struct pending_req *pending_req)
676{
677 free_req(pending_req);
678 make_response(blkif, req->u.other.id, req->operation,
679 BLKIF_RSP_EOPNOTSUPP);
680 return -EIO;
681}
682
682static void xen_blk_drain_io(struct xen_blkif *blkif) 683static void xen_blk_drain_io(struct xen_blkif *blkif)
683{ 684{
684 atomic_set(&blkif->drain, 1); 685 atomic_set(&blkif->drain, 1);
@@ -800,17 +801,30 @@ __do_block_io_op(struct xen_blkif *blkif)
800 801
801 /* Apply all sanity checks to /private copy/ of request. */ 802 /* Apply all sanity checks to /private copy/ of request. */
802 barrier(); 803 barrier();
803 if (unlikely(req.operation == BLKIF_OP_DISCARD)) { 804
805 switch (req.operation) {
806 case BLKIF_OP_READ:
807 case BLKIF_OP_WRITE:
808 case BLKIF_OP_WRITE_BARRIER:
809 case BLKIF_OP_FLUSH_DISKCACHE:
810 if (dispatch_rw_block_io(blkif, &req, pending_req))
811 goto done;
812 break;
813 case BLKIF_OP_DISCARD:
804 free_req(pending_req); 814 free_req(pending_req);
805 if (dispatch_discard_io(blkif, &req)) 815 if (dispatch_discard_io(blkif, &req))
806 break; 816 goto done;
807 } else if (dispatch_rw_block_io(blkif, &req, pending_req))
808 break; 817 break;
818 default:
819 if (dispatch_other_io(blkif, &req, pending_req))
820 goto done;
821 break;
822 }
809 823
810 /* Yield point for this unbounded loop. */ 824 /* Yield point for this unbounded loop. */
811 cond_resched(); 825 cond_resched();
812 } 826 }
813 827done:
814 return more_to_do; 828 return more_to_do;
815} 829}
816 830
@@ -904,7 +918,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
904 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", 918 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
905 operation == READ ? "read" : "write", 919 operation == READ ? "read" : "write",
906 preq.sector_number, 920 preq.sector_number,
907 preq.sector_number + preq.nr_sects, preq.dev); 921 preq.sector_number + preq.nr_sects,
922 blkif->vbd.pdevice);
908 goto fail_response; 923 goto fail_response;
909 } 924 }
910 925
@@ -947,7 +962,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
947 (bio_add_page(bio, 962 (bio_add_page(bio,
948 pages[i], 963 pages[i],
949 seg[i].nsec << 9, 964 seg[i].nsec << 9,
950 seg[i].buf & ~PAGE_MASK) == 0)) { 965 seg[i].offset) == 0)) {
951 966
952 bio = bio_alloc(GFP_KERNEL, nseg-i); 967 bio = bio_alloc(GFP_KERNEL, nseg-i);
953 if (unlikely(bio == NULL)) 968 if (unlikely(bio == NULL))
@@ -977,13 +992,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
977 bio->bi_end_io = end_block_io_op; 992 bio->bi_end_io = end_block_io_op;
978 } 993 }
979 994
980 /*
981 * We set it one so that the last submit_bio does not have to call
982 * atomic_inc.
983 */
984 atomic_set(&pending_req->pendcnt, nbio); 995 atomic_set(&pending_req->pendcnt, nbio);
985
986 /* Get a reference count for the disk queue and start sending I/O */
987 blk_start_plug(&plug); 996 blk_start_plug(&plug);
988 997
989 for (i = 0; i < nbio; i++) 998 for (i = 0; i < nbio; i++)
@@ -1011,6 +1020,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1011 fail_put_bio: 1020 fail_put_bio:
1012 for (i = 0; i < nbio; i++) 1021 for (i = 0; i < nbio; i++)
1013 bio_put(biolist[i]); 1022 bio_put(biolist[i]);
1023 atomic_set(&pending_req->pendcnt, 1);
1014 __end_block_io_op(pending_req, -EINVAL); 1024 __end_block_io_op(pending_req, -EINVAL);
1015 msleep(1); /* back off a bit */ 1025 msleep(1); /* back off a bit */
1016 return -EIO; 1026 return -EIO;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 6072390c7f57..60103e2517ba 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -77,11 +77,18 @@ struct blkif_x86_32_request_discard {
77 uint64_t nr_sectors; 77 uint64_t nr_sectors;
78} __attribute__((__packed__)); 78} __attribute__((__packed__));
79 79
80struct blkif_x86_32_request_other {
81 uint8_t _pad1;
82 blkif_vdev_t _pad2;
83 uint64_t id; /* private guest value, echoed in resp */
84} __attribute__((__packed__));
85
80struct blkif_x86_32_request { 86struct blkif_x86_32_request {
81 uint8_t operation; /* BLKIF_OP_??? */ 87 uint8_t operation; /* BLKIF_OP_??? */
82 union { 88 union {
83 struct blkif_x86_32_request_rw rw; 89 struct blkif_x86_32_request_rw rw;
84 struct blkif_x86_32_request_discard discard; 90 struct blkif_x86_32_request_discard discard;
91 struct blkif_x86_32_request_other other;
85 } u; 92 } u;
86} __attribute__((__packed__)); 93} __attribute__((__packed__));
87 94
@@ -113,11 +120,19 @@ struct blkif_x86_64_request_discard {
113 uint64_t nr_sectors; 120 uint64_t nr_sectors;
114} __attribute__((__packed__)); 121} __attribute__((__packed__));
115 122
123struct blkif_x86_64_request_other {
124 uint8_t _pad1;
125 blkif_vdev_t _pad2;
126 uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */
127 uint64_t id; /* private guest value, echoed in resp */
128} __attribute__((__packed__));
129
116struct blkif_x86_64_request { 130struct blkif_x86_64_request {
117 uint8_t operation; /* BLKIF_OP_??? */ 131 uint8_t operation; /* BLKIF_OP_??? */
118 union { 132 union {
119 struct blkif_x86_64_request_rw rw; 133 struct blkif_x86_64_request_rw rw;
120 struct blkif_x86_64_request_discard discard; 134 struct blkif_x86_64_request_discard discard;
135 struct blkif_x86_64_request_other other;
121 } u; 136 } u;
122} __attribute__((__packed__)); 137} __attribute__((__packed__));
123 138
@@ -172,7 +187,6 @@ struct persistent_gnt {
172 struct page *page; 187 struct page *page;
173 grant_ref_t gnt; 188 grant_ref_t gnt;
174 grant_handle_t handle; 189 grant_handle_t handle;
175 uint64_t dev_bus_addr;
176 struct rb_node node; 190 struct rb_node node;
177}; 191};
178 192
@@ -208,13 +222,13 @@ struct xen_blkif {
208 222
209 /* statistics */ 223 /* statistics */
210 unsigned long st_print; 224 unsigned long st_print;
211 int st_rd_req; 225 unsigned long long st_rd_req;
212 int st_wr_req; 226 unsigned long long st_wr_req;
213 int st_oo_req; 227 unsigned long long st_oo_req;
214 int st_f_req; 228 unsigned long long st_f_req;
215 int st_ds_req; 229 unsigned long long st_ds_req;
216 int st_rd_sect; 230 unsigned long long st_rd_sect;
217 int st_wr_sect; 231 unsigned long long st_wr_sect;
218 232
219 wait_queue_head_t waiting_to_free; 233 wait_queue_head_t waiting_to_free;
220}; 234};
@@ -278,6 +292,11 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
278 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 292 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
279 break; 293 break;
280 default: 294 default:
295 /*
296 * Don't know how to translate this op. Only get the
297 * ID so failure can be reported to the frontend.
298 */
299 dst->u.other.id = src->u.other.id;
281 break; 300 break;
282 } 301 }
283} 302}
@@ -309,6 +328,11 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
309 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 328 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
310 break; 329 break;
311 default: 330 default:
331 /*
332 * Don't know how to translate this op. Only get the
333 * ID so failure can be reported to the frontend.
334 */
335 dst->u.other.id = src->u.other.id;
312 break; 336 break;
313 } 337 }
314} 338}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 5e237f630c47..8bfd1bcf95ec 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -230,13 +230,13 @@ int __init xen_blkif_interface_init(void)
230 } \ 230 } \
231 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) 231 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
232 232
233VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req); 233VBD_SHOW(oo_req, "%llu\n", be->blkif->st_oo_req);
234VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req); 234VBD_SHOW(rd_req, "%llu\n", be->blkif->st_rd_req);
235VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req); 235VBD_SHOW(wr_req, "%llu\n", be->blkif->st_wr_req);
236VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req); 236VBD_SHOW(f_req, "%llu\n", be->blkif->st_f_req);
237VBD_SHOW(ds_req, "%d\n", be->blkif->st_ds_req); 237VBD_SHOW(ds_req, "%llu\n", be->blkif->st_ds_req);
238VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect); 238VBD_SHOW(rd_sect, "%llu\n", be->blkif->st_rd_sect);
239VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect); 239VBD_SHOW(wr_sect, "%llu\n", be->blkif->st_wr_sect);
240 240
241static struct attribute *xen_vbdstat_attrs[] = { 241static struct attribute *xen_vbdstat_attrs[] = {
242 &dev_attr_oo_req.attr, 242 &dev_attr_oo_req.attr,
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c3dae2e0f290..a894f88762d8 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -44,7 +44,7 @@
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45#include <linux/scatterlist.h> 45#include <linux/scatterlist.h>
46#include <linux/bitmap.h> 46#include <linux/bitmap.h>
47#include <linux/llist.h> 47#include <linux/list.h>
48 48
49#include <xen/xen.h> 49#include <xen/xen.h>
50#include <xen/xenbus.h> 50#include <xen/xenbus.h>
@@ -68,13 +68,12 @@ enum blkif_state {
68struct grant { 68struct grant {
69 grant_ref_t gref; 69 grant_ref_t gref;
70 unsigned long pfn; 70 unsigned long pfn;
71 struct llist_node node; 71 struct list_head node;
72}; 72};
73 73
74struct blk_shadow { 74struct blk_shadow {
75 struct blkif_request req; 75 struct blkif_request req;
76 struct request *request; 76 struct request *request;
77 unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
78 struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 77 struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
79}; 78};
80 79
@@ -105,7 +104,7 @@ struct blkfront_info
105 struct work_struct work; 104 struct work_struct work;
106 struct gnttab_free_callback callback; 105 struct gnttab_free_callback callback;
107 struct blk_shadow shadow[BLK_RING_SIZE]; 106 struct blk_shadow shadow[BLK_RING_SIZE];
108 struct llist_head persistent_gnts; 107 struct list_head persistent_gnts;
109 unsigned int persistent_gnts_c; 108 unsigned int persistent_gnts_c;
110 unsigned long shadow_free; 109 unsigned long shadow_free;
111 unsigned int feature_flush; 110 unsigned int feature_flush;
@@ -165,6 +164,69 @@ static int add_id_to_freelist(struct blkfront_info *info,
165 return 0; 164 return 0;
166} 165}
167 166
167static int fill_grant_buffer(struct blkfront_info *info, int num)
168{
169 struct page *granted_page;
170 struct grant *gnt_list_entry, *n;
171 int i = 0;
172
173 while(i < num) {
174 gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
175 if (!gnt_list_entry)
176 goto out_of_memory;
177
178 granted_page = alloc_page(GFP_NOIO);
179 if (!granted_page) {
180 kfree(gnt_list_entry);
181 goto out_of_memory;
182 }
183
184 gnt_list_entry->pfn = page_to_pfn(granted_page);
185 gnt_list_entry->gref = GRANT_INVALID_REF;
186 list_add(&gnt_list_entry->node, &info->persistent_gnts);
187 i++;
188 }
189
190 return 0;
191
192out_of_memory:
193 list_for_each_entry_safe(gnt_list_entry, n,
194 &info->persistent_gnts, node) {
195 list_del(&gnt_list_entry->node);
196 __free_page(pfn_to_page(gnt_list_entry->pfn));
197 kfree(gnt_list_entry);
198 i--;
199 }
200 BUG_ON(i != 0);
201 return -ENOMEM;
202}
203
204static struct grant *get_grant(grant_ref_t *gref_head,
205 struct blkfront_info *info)
206{
207 struct grant *gnt_list_entry;
208 unsigned long buffer_mfn;
209
210 BUG_ON(list_empty(&info->persistent_gnts));
211 gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant,
212 node);
213 list_del(&gnt_list_entry->node);
214
215 if (gnt_list_entry->gref != GRANT_INVALID_REF) {
216 info->persistent_gnts_c--;
217 return gnt_list_entry;
218 }
219
220 /* Assign a gref to this page */
221 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
222 BUG_ON(gnt_list_entry->gref == -ENOSPC);
223 buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
224 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
225 info->xbdev->otherend_id,
226 buffer_mfn, 0);
227 return gnt_list_entry;
228}
229
168static const char *op_name(int op) 230static const char *op_name(int op)
169{ 231{
170 static const char *const names[] = { 232 static const char *const names[] = {
@@ -293,7 +355,6 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
293static int blkif_queue_request(struct request *req) 355static int blkif_queue_request(struct request *req)
294{ 356{
295 struct blkfront_info *info = req->rq_disk->private_data; 357 struct blkfront_info *info = req->rq_disk->private_data;
296 unsigned long buffer_mfn;
297 struct blkif_request *ring_req; 358 struct blkif_request *ring_req;
298 unsigned long id; 359 unsigned long id;
299 unsigned int fsect, lsect; 360 unsigned int fsect, lsect;
@@ -306,7 +367,6 @@ static int blkif_queue_request(struct request *req)
306 */ 367 */
307 bool new_persistent_gnts; 368 bool new_persistent_gnts;
308 grant_ref_t gref_head; 369 grant_ref_t gref_head;
309 struct page *granted_page;
310 struct grant *gnt_list_entry = NULL; 370 struct grant *gnt_list_entry = NULL;
311 struct scatterlist *sg; 371 struct scatterlist *sg;
312 372
@@ -370,41 +430,8 @@ static int blkif_queue_request(struct request *req)
370 fsect = sg->offset >> 9; 430 fsect = sg->offset >> 9;
371 lsect = fsect + (sg->length >> 9) - 1; 431 lsect = fsect + (sg->length >> 9) - 1;
372 432
373 if (info->persistent_gnts_c) { 433 gnt_list_entry = get_grant(&gref_head, info);
374 BUG_ON(llist_empty(&info->persistent_gnts)); 434 ref = gnt_list_entry->gref;
375 gnt_list_entry = llist_entry(
376 llist_del_first(&info->persistent_gnts),
377 struct grant, node);
378
379 ref = gnt_list_entry->gref;
380 buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
381 info->persistent_gnts_c--;
382 } else {
383 ref = gnttab_claim_grant_reference(&gref_head);
384 BUG_ON(ref == -ENOSPC);
385
386 gnt_list_entry =
387 kmalloc(sizeof(struct grant),
388 GFP_ATOMIC);
389 if (!gnt_list_entry)
390 return -ENOMEM;
391
392 granted_page = alloc_page(GFP_ATOMIC);
393 if (!granted_page) {
394 kfree(gnt_list_entry);
395 return -ENOMEM;
396 }
397
398 gnt_list_entry->pfn =
399 page_to_pfn(granted_page);
400 gnt_list_entry->gref = ref;
401
402 buffer_mfn = pfn_to_mfn(page_to_pfn(
403 granted_page));
404 gnttab_grant_foreign_access_ref(ref,
405 info->xbdev->otherend_id,
406 buffer_mfn, 0);
407 }
408 435
409 info->shadow[id].grants_used[i] = gnt_list_entry; 436 info->shadow[id].grants_used[i] = gnt_list_entry;
410 437
@@ -435,7 +462,6 @@ static int blkif_queue_request(struct request *req)
435 kunmap_atomic(shared_data); 462 kunmap_atomic(shared_data);
436 } 463 }
437 464
438 info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
439 ring_req->u.rw.seg[i] = 465 ring_req->u.rw.seg[i] =
440 (struct blkif_request_segment) { 466 (struct blkif_request_segment) {
441 .gref = ref, 467 .gref = ref,
@@ -790,9 +816,8 @@ static void blkif_restart_queue(struct work_struct *work)
790 816
791static void blkif_free(struct blkfront_info *info, int suspend) 817static void blkif_free(struct blkfront_info *info, int suspend)
792{ 818{
793 struct llist_node *all_gnts; 819 struct grant *persistent_gnt;
794 struct grant *persistent_gnt, *tmp; 820 struct grant *n;
795 struct llist_node *n;
796 821
797 /* Prevent new requests being issued until we fix things up. */ 822 /* Prevent new requests being issued until we fix things up. */
798 spin_lock_irq(&info->io_lock); 823 spin_lock_irq(&info->io_lock);
@@ -803,22 +828,20 @@ static void blkif_free(struct blkfront_info *info, int suspend)
803 blk_stop_queue(info->rq); 828 blk_stop_queue(info->rq);
804 829
805 /* Remove all persistent grants */ 830 /* Remove all persistent grants */
806 if (info->persistent_gnts_c) { 831 if (!list_empty(&info->persistent_gnts)) {
807 all_gnts = llist_del_all(&info->persistent_gnts); 832 list_for_each_entry_safe(persistent_gnt, n,
808 persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node); 833 &info->persistent_gnts, node) {
809 while (persistent_gnt) { 834 list_del(&persistent_gnt->node);
810 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); 835 if (persistent_gnt->gref != GRANT_INVALID_REF) {
836 gnttab_end_foreign_access(persistent_gnt->gref,
837 0, 0UL);
838 info->persistent_gnts_c--;
839 }
811 __free_page(pfn_to_page(persistent_gnt->pfn)); 840 __free_page(pfn_to_page(persistent_gnt->pfn));
812 tmp = persistent_gnt; 841 kfree(persistent_gnt);
813 n = persistent_gnt->node.next;
814 if (n)
815 persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node);
816 else
817 persistent_gnt = NULL;
818 kfree(tmp);
819 } 842 }
820 info->persistent_gnts_c = 0;
821 } 843 }
844 BUG_ON(info->persistent_gnts_c != 0);
822 845
823 /* No more gnttab callback work. */ 846 /* No more gnttab callback work. */
824 gnttab_cancel_free_callback(&info->callback); 847 gnttab_cancel_free_callback(&info->callback);
@@ -875,7 +898,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
875 } 898 }
876 /* Add the persistent grant into the list of free grants */ 899 /* Add the persistent grant into the list of free grants */
877 for (i = 0; i < s->req.u.rw.nr_segments; i++) { 900 for (i = 0; i < s->req.u.rw.nr_segments; i++) {
878 llist_add(&s->grants_used[i]->node, &info->persistent_gnts); 901 list_add(&s->grants_used[i]->node, &info->persistent_gnts);
879 info->persistent_gnts_c++; 902 info->persistent_gnts_c++;
880 } 903 }
881} 904}
@@ -1013,6 +1036,12 @@ static int setup_blkring(struct xenbus_device *dev,
1013 1036
1014 sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); 1037 sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
1015 1038
1039 /* Allocate memory for grants */
1040 err = fill_grant_buffer(info, BLK_RING_SIZE *
1041 BLKIF_MAX_SEGMENTS_PER_REQUEST);
1042 if (err)
1043 goto fail;
1044
1016 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); 1045 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
1017 if (err < 0) { 1046 if (err < 0) {
1018 free_page((unsigned long)sring); 1047 free_page((unsigned long)sring);
@@ -1171,7 +1200,7 @@ static int blkfront_probe(struct xenbus_device *dev,
1171 spin_lock_init(&info->io_lock); 1200 spin_lock_init(&info->io_lock);
1172 info->xbdev = dev; 1201 info->xbdev = dev;
1173 info->vdevice = vdevice; 1202 info->vdevice = vdevice;
1174 init_llist_head(&info->persistent_gnts); 1203 INIT_LIST_HEAD(&info->persistent_gnts);
1175 info->persistent_gnts_c = 0; 1204 info->persistent_gnts_c = 0;
1176 info->connected = BLKIF_STATE_DISCONNECTED; 1205 info->connected = BLKIF_STATE_DISCONNECTED;
1177 INIT_WORK(&info->work, blkif_restart_queue); 1206 INIT_WORK(&info->work, blkif_restart_queue);
@@ -1203,11 +1232,10 @@ static int blkif_recover(struct blkfront_info *info)
1203 int j; 1232 int j;
1204 1233
1205 /* Stage 1: Make a safe copy of the shadow state. */ 1234 /* Stage 1: Make a safe copy of the shadow state. */
1206 copy = kmalloc(sizeof(info->shadow), 1235 copy = kmemdup(info->shadow, sizeof(info->shadow),
1207 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); 1236 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
1208 if (!copy) 1237 if (!copy)
1209 return -ENOMEM; 1238 return -ENOMEM;
1210 memcpy(copy, info->shadow, sizeof(info->shadow));
1211 1239
1212 /* Stage 2: Set up free list. */ 1240 /* Stage 2: Set up free list. */
1213 memset(&info->shadow, 0, sizeof(info->shadow)); 1241 memset(&info->shadow, 0, sizeof(info->shadow));
@@ -1236,7 +1264,7 @@ static int blkif_recover(struct blkfront_info *info)
1236 gnttab_grant_foreign_access_ref( 1264 gnttab_grant_foreign_access_ref(
1237 req->u.rw.seg[j].gref, 1265 req->u.rw.seg[j].gref,
1238 info->xbdev->otherend_id, 1266 info->xbdev->otherend_id,
1239 pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]), 1267 pfn_to_mfn(copy[i].grants_used[j]->pfn),
1240 0); 1268 0);
1241 } 1269 }
1242 info->shadow[req->u.rw.id].req = *req; 1270 info->shadow[req->u.rw.id].req = *req;
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index b282af181b44..6aab00ef4379 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -73,9 +73,11 @@ static struct usb_device_id ath3k_table[] = {
73 { USB_DEVICE(0x03F0, 0x311D) }, 73 { USB_DEVICE(0x03F0, 0x311D) },
74 74
75 /* Atheros AR3012 with sflash firmware*/ 75 /* Atheros AR3012 with sflash firmware*/
76 { USB_DEVICE(0x0CF3, 0x0036) },
76 { USB_DEVICE(0x0CF3, 0x3004) }, 77 { USB_DEVICE(0x0CF3, 0x3004) },
77 { USB_DEVICE(0x0CF3, 0x3008) }, 78 { USB_DEVICE(0x0CF3, 0x3008) },
78 { USB_DEVICE(0x0CF3, 0x311D) }, 79 { USB_DEVICE(0x0CF3, 0x311D) },
80 { USB_DEVICE(0x0CF3, 0x817a) },
79 { USB_DEVICE(0x13d3, 0x3375) }, 81 { USB_DEVICE(0x13d3, 0x3375) },
80 { USB_DEVICE(0x04CA, 0x3004) }, 82 { USB_DEVICE(0x04CA, 0x3004) },
81 { USB_DEVICE(0x04CA, 0x3005) }, 83 { USB_DEVICE(0x04CA, 0x3005) },
@@ -107,9 +109,11 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
107static struct usb_device_id ath3k_blist_tbl[] = { 109static struct usb_device_id ath3k_blist_tbl[] = {
108 110
109 /* Atheros AR3012 with sflash firmware*/ 111 /* Atheros AR3012 with sflash firmware*/
112 { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
110 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 113 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
111 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, 114 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
112 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, 115 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
116 { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
113 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 117 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
114 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, 118 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
115 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 119 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index e547851870e7..2cc5f774a29c 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -131,9 +131,11 @@ static struct usb_device_id blacklist_table[] = {
131 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, 131 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
132 132
133 /* Atheros 3012 with sflash firmware */ 133 /* Atheros 3012 with sflash firmware */
134 { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
134 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 135 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
135 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, 136 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
136 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, 137 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
138 { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
137 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 139 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
138 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, 140 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
139 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 141 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 69ae5972713c..a0f7724852eb 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -380,6 +380,15 @@ void hwrng_unregister(struct hwrng *rng)
380} 380}
381EXPORT_SYMBOL_GPL(hwrng_unregister); 381EXPORT_SYMBOL_GPL(hwrng_unregister);
382 382
383static void __exit hwrng_exit(void)
384{
385 mutex_lock(&rng_mutex);
386 BUG_ON(current_rng);
387 kfree(rng_buffer);
388 mutex_unlock(&rng_mutex);
389}
390
391module_exit(hwrng_exit);
383 392
384MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); 393MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
385MODULE_LICENSE("GPL"); 394MODULE_LICENSE("GPL");
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index e905d5f53051..ce5f3fc25d6d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -149,7 +149,8 @@ struct ports_device {
149 spinlock_t ports_lock; 149 spinlock_t ports_lock;
150 150
151 /* To protect the vq operations for the control channel */ 151 /* To protect the vq operations for the control channel */
152 spinlock_t cvq_lock; 152 spinlock_t c_ivq_lock;
153 spinlock_t c_ovq_lock;
153 154
154 /* The current config space is stored here */ 155 /* The current config space is stored here */
155 struct virtio_console_config config; 156 struct virtio_console_config config;
@@ -569,11 +570,14 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
569 vq = portdev->c_ovq; 570 vq = portdev->c_ovq;
570 571
571 sg_init_one(sg, &cpkt, sizeof(cpkt)); 572 sg_init_one(sg, &cpkt, sizeof(cpkt));
573
574 spin_lock(&portdev->c_ovq_lock);
572 if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { 575 if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) {
573 virtqueue_kick(vq); 576 virtqueue_kick(vq);
574 while (!virtqueue_get_buf(vq, &len)) 577 while (!virtqueue_get_buf(vq, &len))
575 cpu_relax(); 578 cpu_relax();
576 } 579 }
580 spin_unlock(&portdev->c_ovq_lock);
577 return 0; 581 return 0;
578} 582}
579 583
@@ -1436,7 +1440,7 @@ static int add_port(struct ports_device *portdev, u32 id)
1436 * rproc_serial does not want the console port, only 1440 * rproc_serial does not want the console port, only
1437 * the generic port implementation. 1441 * the generic port implementation.
1438 */ 1442 */
1439 port->host_connected = port->guest_connected = true; 1443 port->host_connected = true;
1440 else if (!use_multiport(port->portdev)) { 1444 else if (!use_multiport(port->portdev)) {
1441 /* 1445 /*
1442 * If we're not using multiport support, 1446 * If we're not using multiport support,
@@ -1709,23 +1713,23 @@ static void control_work_handler(struct work_struct *work)
1709 portdev = container_of(work, struct ports_device, control_work); 1713 portdev = container_of(work, struct ports_device, control_work);
1710 vq = portdev->c_ivq; 1714 vq = portdev->c_ivq;
1711 1715
1712 spin_lock(&portdev->cvq_lock); 1716 spin_lock(&portdev->c_ivq_lock);
1713 while ((buf = virtqueue_get_buf(vq, &len))) { 1717 while ((buf = virtqueue_get_buf(vq, &len))) {
1714 spin_unlock(&portdev->cvq_lock); 1718 spin_unlock(&portdev->c_ivq_lock);
1715 1719
1716 buf->len = len; 1720 buf->len = len;
1717 buf->offset = 0; 1721 buf->offset = 0;
1718 1722
1719 handle_control_message(portdev, buf); 1723 handle_control_message(portdev, buf);
1720 1724
1721 spin_lock(&portdev->cvq_lock); 1725 spin_lock(&portdev->c_ivq_lock);
1722 if (add_inbuf(portdev->c_ivq, buf) < 0) { 1726 if (add_inbuf(portdev->c_ivq, buf) < 0) {
1723 dev_warn(&portdev->vdev->dev, 1727 dev_warn(&portdev->vdev->dev,
1724 "Error adding buffer to queue\n"); 1728 "Error adding buffer to queue\n");
1725 free_buf(buf, false); 1729 free_buf(buf, false);
1726 } 1730 }
1727 } 1731 }
1728 spin_unlock(&portdev->cvq_lock); 1732 spin_unlock(&portdev->c_ivq_lock);
1729} 1733}
1730 1734
1731static void out_intr(struct virtqueue *vq) 1735static void out_intr(struct virtqueue *vq)
@@ -1752,13 +1756,23 @@ static void in_intr(struct virtqueue *vq)
1752 port->inbuf = get_inbuf(port); 1756 port->inbuf = get_inbuf(port);
1753 1757
1754 /* 1758 /*
1755 * Don't queue up data when port is closed. This condition 1759 * Normally the port should not accept data when the port is
1760 * closed. For generic serial ports, the host won't (shouldn't)
1761 * send data till the guest is connected. But this condition
1756 * can be reached when a console port is not yet connected (no 1762 * can be reached when a console port is not yet connected (no
1757 * tty is spawned) and the host sends out data to console 1763 * tty is spawned) and the other side sends out data over the
1758 * ports. For generic serial ports, the host won't 1764 * vring, or when a remote devices start sending data before
1759 * (shouldn't) send data till the guest is connected. 1765 * the ports are opened.
1766 *
1767 * A generic serial port will discard data if not connected,
1768 * while console ports and rproc-serial ports accepts data at
1769 * any time. rproc-serial is initiated with guest_connected to
1770 * false because port_fops_open expects this. Console ports are
1771 * hooked up with an HVC console and is initialized with
1772 * guest_connected to true.
1760 */ 1773 */
1761 if (!port->guest_connected) 1774
1775 if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))
1762 discard_port_data(port); 1776 discard_port_data(port);
1763 1777
1764 spin_unlock_irqrestore(&port->inbuf_lock, flags); 1778 spin_unlock_irqrestore(&port->inbuf_lock, flags);
@@ -1986,10 +2000,12 @@ static int virtcons_probe(struct virtio_device *vdev)
1986 if (multiport) { 2000 if (multiport) {
1987 unsigned int nr_added_bufs; 2001 unsigned int nr_added_bufs;
1988 2002
1989 spin_lock_init(&portdev->cvq_lock); 2003 spin_lock_init(&portdev->c_ivq_lock);
2004 spin_lock_init(&portdev->c_ovq_lock);
1990 INIT_WORK(&portdev->control_work, &control_work_handler); 2005 INIT_WORK(&portdev->control_work, &control_work_handler);
1991 2006
1992 nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); 2007 nr_added_bufs = fill_queue(portdev->c_ivq,
2008 &portdev->c_ivq_lock);
1993 if (!nr_added_bufs) { 2009 if (!nr_added_bufs) {
1994 dev_err(&vdev->dev, 2010 dev_err(&vdev->dev,
1995 "Error allocating buffers for control queue\n"); 2011 "Error allocating buffers for control queue\n");
@@ -2140,7 +2156,7 @@ static int virtcons_restore(struct virtio_device *vdev)
2140 return ret; 2156 return ret;
2141 2157
2142 if (use_multiport(portdev)) 2158 if (use_multiport(portdev))
2143 fill_queue(portdev->c_ivq, &portdev->cvq_lock); 2159 fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
2144 2160
2145 list_for_each_entry(port, &portdev->ports, list) { 2161 list_for_each_entry(port, &portdev->ports, list) {
2146 port->in_vq = portdev->in_vqs[port->id]; 2162 port->in_vq = portdev->in_vqs[port->id];
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 1e2de7305362..f873dcefe0de 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -703,7 +703,7 @@ static void tegra20_pll_init(void)
703 clks[pll_a_out0] = clk; 703 clks[pll_a_out0] = clk;
704 704
705 /* PLLE */ 705 /* PLLE */
706 clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL, 706 clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base,
707 0, 100000000, &pll_e_params, 707 0, 100000000, &pll_e_params,
708 0, pll_e_freq_table, NULL); 708 0, pll_e_freq_table, NULL);
709 clk_register_clkdev(clk, "pll_e", NULL); 709 clk_register_clkdev(clk, "pll_e", NULL);
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 937bc286591f..57a8774f0b4e 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -730,7 +730,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
730 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 730 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
731 cpumask_copy(policy->cpus, perf->shared_cpu_map); 731 cpumask_copy(policy->cpus, perf->shared_cpu_map);
732 } 732 }
733 cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
734 733
735#ifdef CONFIG_SMP 734#ifdef CONFIG_SMP
736 dmi_check_system(sw_any_bug_dmi_table); 735 dmi_check_system(sw_any_bug_dmi_table);
@@ -742,7 +741,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
742 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { 741 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
743 cpumask_clear(policy->cpus); 742 cpumask_clear(policy->cpus);
744 cpumask_set_cpu(cpu, policy->cpus); 743 cpumask_set_cpu(cpu, policy->cpus);
745 cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
746 policy->shared_type = CPUFREQ_SHARED_TYPE_HW; 744 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
747 pr_info_once(PFX "overriding BIOS provided _PSD data\n"); 745 pr_info_once(PFX "overriding BIOS provided _PSD data\n");
748 } 746 }
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 4e5b7fb8927c..37d23a0f8c56 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -178,10 +178,16 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
178 178
179static int cpu0_cpufreq_probe(struct platform_device *pdev) 179static int cpu0_cpufreq_probe(struct platform_device *pdev)
180{ 180{
181 struct device_node *np; 181 struct device_node *np, *parent;
182 int ret; 182 int ret;
183 183
184 for_each_child_of_node(of_find_node_by_path("/cpus"), np) { 184 parent = of_find_node_by_path("/cpus");
185 if (!parent) {
186 pr_err("failed to find OF /cpus\n");
187 return -ENOENT;
188 }
189
190 for_each_child_of_node(parent, np) {
185 if (of_get_property(np, "operating-points", NULL)) 191 if (of_get_property(np, "operating-points", NULL))
186 break; 192 break;
187 } 193 }
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 46bde01eee62..cc4bd2f6838a 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -14,8 +14,8 @@
14 * published by the Free Software Foundation. 14 * published by the Free Software Foundation.
15 */ 15 */
16 16
17#ifndef _CPUFREQ_GOVERNER_H 17#ifndef _CPUFREQ_GOVERNOR_H
18#define _CPUFREQ_GOVERNER_H 18#define _CPUFREQ_GOVERNOR_H
19 19
20#include <linux/cpufreq.h> 20#include <linux/cpufreq.h>
21#include <linux/kobject.h> 21#include <linux/kobject.h>
@@ -175,4 +175,4 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs,
175 unsigned int sampling_rate); 175 unsigned int sampling_rate);
176int cpufreq_governor_dbs(struct dbs_data *dbs_data, 176int cpufreq_governor_dbs(struct dbs_data *dbs_data,
177 struct cpufreq_policy *policy, unsigned int event); 177 struct cpufreq_policy *policy, unsigned int event);
178#endif /* _CPUFREQ_GOVERNER_H */ 178#endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 2fd779eb1ed1..bfd6273fd873 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -180,15 +180,19 @@ static void cpufreq_stats_free_sysfs(unsigned int cpu)
180{ 180{
181 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 181 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
182 182
183 if (!cpufreq_frequency_get_table(cpu)) 183 if (!policy)
184 return; 184 return;
185 185
186 if (policy && !policy_is_shared(policy)) { 186 if (!cpufreq_frequency_get_table(cpu))
187 goto put_ref;
188
189 if (!policy_is_shared(policy)) {
187 pr_debug("%s: Free sysfs stat\n", __func__); 190 pr_debug("%s: Free sysfs stat\n", __func__);
188 sysfs_remove_group(&policy->kobj, &stats_attr_group); 191 sysfs_remove_group(&policy->kobj, &stats_attr_group);
189 } 192 }
190 if (policy) 193
191 cpufreq_cpu_put(policy); 194put_ref:
195 cpufreq_cpu_put(policy);
192} 196}
193 197
194static int cpufreq_stats_create_table(struct cpufreq_policy *policy, 198static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f6dd1e761129..6133ef5cf671 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -358,14 +358,14 @@ static void intel_pstate_sysfs_expose_params(void)
358static int intel_pstate_min_pstate(void) 358static int intel_pstate_min_pstate(void)
359{ 359{
360 u64 value; 360 u64 value;
361 rdmsrl(0xCE, value); 361 rdmsrl(MSR_PLATFORM_INFO, value);
362 return (value >> 40) & 0xFF; 362 return (value >> 40) & 0xFF;
363} 363}
364 364
365static int intel_pstate_max_pstate(void) 365static int intel_pstate_max_pstate(void)
366{ 366{
367 u64 value; 367 u64 value;
368 rdmsrl(0xCE, value); 368 rdmsrl(MSR_PLATFORM_INFO, value);
369 return (value >> 8) & 0xFF; 369 return (value >> 8) & 0xFF;
370} 370}
371 371
@@ -373,7 +373,7 @@ static int intel_pstate_turbo_pstate(void)
373{ 373{
374 u64 value; 374 u64 value;
375 int nont, ret; 375 int nont, ret;
376 rdmsrl(0x1AD, value); 376 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
377 nont = intel_pstate_max_pstate(); 377 nont = intel_pstate_max_pstate();
378 ret = ((value) & 255); 378 ret = ((value) & 255);
379 if (ret <= nont) 379 if (ret <= nont)
@@ -454,7 +454,7 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
454 sample->idletime_us * 100, 454 sample->idletime_us * 100,
455 sample->duration_us); 455 sample->duration_us);
456 core_pct = div64_u64(sample->aperf * 100, sample->mperf); 456 core_pct = div64_u64(sample->aperf * 100, sample->mperf);
457 sample->freq = cpu->pstate.turbo_pstate * core_pct * 1000; 457 sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
458 458
459 sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), 459 sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
460 100); 460 100);
@@ -502,7 +502,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
502 502
503 sample_time = cpu->pstate_policy->sample_rate_ms; 503 sample_time = cpu->pstate_policy->sample_rate_ms;
504 delay = msecs_to_jiffies(sample_time); 504 delay = msecs_to_jiffies(sample_time);
505 delay -= jiffies % delay;
506 mod_timer_pinned(&cpu->timer, jiffies + delay); 505 mod_timer_pinned(&cpu->timer, jiffies + delay);
507} 506}
508 507
@@ -752,6 +751,29 @@ static struct cpufreq_driver intel_pstate_driver = {
752 751
753static int __initdata no_load; 752static int __initdata no_load;
754 753
754static int intel_pstate_msrs_not_valid(void)
755{
756 /* Check that all the msr's we are using are valid. */
757 u64 aperf, mperf, tmp;
758
759 rdmsrl(MSR_IA32_APERF, aperf);
760 rdmsrl(MSR_IA32_MPERF, mperf);
761
762 if (!intel_pstate_min_pstate() ||
763 !intel_pstate_max_pstate() ||
764 !intel_pstate_turbo_pstate())
765 return -ENODEV;
766
767 rdmsrl(MSR_IA32_APERF, tmp);
768 if (!(tmp - aperf))
769 return -ENODEV;
770
771 rdmsrl(MSR_IA32_MPERF, tmp);
772 if (!(tmp - mperf))
773 return -ENODEV;
774
775 return 0;
776}
755static int __init intel_pstate_init(void) 777static int __init intel_pstate_init(void)
756{ 778{
757 int cpu, rc = 0; 779 int cpu, rc = 0;
@@ -764,6 +786,9 @@ static int __init intel_pstate_init(void)
764 if (!id) 786 if (!id)
765 return -ENODEV; 787 return -ENODEV;
766 788
789 if (intel_pstate_msrs_not_valid())
790 return -ENODEV;
791
767 pr_info("Intel P-state driver initializing.\n"); 792 pr_info("Intel P-state driver initializing.\n");
768 793
769 all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); 794 all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index b2a0a0726a54..cf268b14ae9a 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1650,11 +1650,7 @@ struct caam_alg_template {
1650}; 1650};
1651 1651
1652static struct caam_alg_template driver_algs[] = { 1652static struct caam_alg_template driver_algs[] = {
1653 /* 1653 /* single-pass ipsec_esp descriptor */
1654 * single-pass ipsec_esp descriptor
1655 * authencesn(*,*) is also registered, although not present
1656 * explicitly here.
1657 */
1658 { 1654 {
1659 .name = "authenc(hmac(md5),cbc(aes))", 1655 .name = "authenc(hmac(md5),cbc(aes))",
1660 .driver_name = "authenc-hmac-md5-cbc-aes-caam", 1656 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
@@ -2217,9 +2213,7 @@ static int __init caam_algapi_init(void)
2217 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2213 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2218 /* TODO: check if h/w supports alg */ 2214 /* TODO: check if h/w supports alg */
2219 struct caam_crypto_alg *t_alg; 2215 struct caam_crypto_alg *t_alg;
2220 bool done = false;
2221 2216
2222authencesn:
2223 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); 2217 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
2224 if (IS_ERR(t_alg)) { 2218 if (IS_ERR(t_alg)) {
2225 err = PTR_ERR(t_alg); 2219 err = PTR_ERR(t_alg);
@@ -2233,25 +2227,8 @@ authencesn:
2233 dev_warn(ctrldev, "%s alg registration failed\n", 2227 dev_warn(ctrldev, "%s alg registration failed\n",
2234 t_alg->crypto_alg.cra_driver_name); 2228 t_alg->crypto_alg.cra_driver_name);
2235 kfree(t_alg); 2229 kfree(t_alg);
2236 } else { 2230 } else
2237 list_add_tail(&t_alg->entry, &priv->alg_list); 2231 list_add_tail(&t_alg->entry, &priv->alg_list);
2238 if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD &&
2239 !memcmp(driver_algs[i].name, "authenc", 7) &&
2240 !done) {
2241 char *name;
2242
2243 name = driver_algs[i].name;
2244 memmove(name + 10, name + 7, strlen(name) - 7);
2245 memcpy(name + 7, "esn", 3);
2246
2247 name = driver_algs[i].driver_name;
2248 memmove(name + 10, name + 7, strlen(name) - 7);
2249 memcpy(name + 7, "esn", 3);
2250
2251 done = true;
2252 goto authencesn;
2253 }
2254 }
2255 } 2232 }
2256 if (!list_empty(&priv->alg_list)) 2233 if (!list_empty(&priv->alg_list))
2257 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", 2234 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index cf15e7813801..762aeff626ac 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -23,7 +23,6 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/circ_buf.h> 25#include <linux/circ_buf.h>
26#include <linux/string.h>
27#include <net/xfrm.h> 26#include <net/xfrm.h>
28 27
29#include <crypto/algapi.h> 28#include <crypto/algapi.h>
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 09b184adf31b..5b2b5e61e4f9 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -38,7 +38,6 @@
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <linux/rtnetlink.h> 39#include <linux/rtnetlink.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/string.h>
42 41
43#include <crypto/algapi.h> 42#include <crypto/algapi.h>
44#include <crypto/aes.h> 43#include <crypto/aes.h>
@@ -1974,11 +1973,7 @@ struct talitos_alg_template {
1974}; 1973};
1975 1974
1976static struct talitos_alg_template driver_algs[] = { 1975static struct talitos_alg_template driver_algs[] = {
1977 /* 1976 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
1978 * AEAD algorithms. These use a single-pass ipsec_esp descriptor.
1979 * authencesn(*,*) is also registered, although not present
1980 * explicitly here.
1981 */
1982 { .type = CRYPTO_ALG_TYPE_AEAD, 1977 { .type = CRYPTO_ALG_TYPE_AEAD,
1983 .alg.crypto = { 1978 .alg.crypto = {
1984 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1979 .cra_name = "authenc(hmac(sha1),cbc(aes))",
@@ -2820,9 +2815,7 @@ static int talitos_probe(struct platform_device *ofdev)
2820 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { 2815 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2821 struct talitos_crypto_alg *t_alg; 2816 struct talitos_crypto_alg *t_alg;
2822 char *name = NULL; 2817 char *name = NULL;
2823 bool authenc = false;
2824 2818
2825authencesn:
2826 t_alg = talitos_alg_alloc(dev, &driver_algs[i]); 2819 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2827 if (IS_ERR(t_alg)) { 2820 if (IS_ERR(t_alg)) {
2828 err = PTR_ERR(t_alg); 2821 err = PTR_ERR(t_alg);
@@ -2837,8 +2830,6 @@ authencesn:
2837 err = crypto_register_alg( 2830 err = crypto_register_alg(
2838 &t_alg->algt.alg.crypto); 2831 &t_alg->algt.alg.crypto);
2839 name = t_alg->algt.alg.crypto.cra_driver_name; 2832 name = t_alg->algt.alg.crypto.cra_driver_name;
2840 authenc = authenc ? !authenc :
2841 !(bool)memcmp(name, "authenc", 7);
2842 break; 2833 break;
2843 case CRYPTO_ALG_TYPE_AHASH: 2834 case CRYPTO_ALG_TYPE_AHASH:
2844 err = crypto_register_ahash( 2835 err = crypto_register_ahash(
@@ -2851,25 +2842,8 @@ authencesn:
2851 dev_err(dev, "%s alg registration failed\n", 2842 dev_err(dev, "%s alg registration failed\n",
2852 name); 2843 name);
2853 kfree(t_alg); 2844 kfree(t_alg);
2854 } else { 2845 } else
2855 list_add_tail(&t_alg->entry, &priv->alg_list); 2846 list_add_tail(&t_alg->entry, &priv->alg_list);
2856 if (authenc) {
2857 struct crypto_alg *alg =
2858 &driver_algs[i].alg.crypto;
2859
2860 name = alg->cra_name;
2861 memmove(name + 10, name + 7,
2862 strlen(name) - 7);
2863 memcpy(name + 7, "esn", 3);
2864
2865 name = alg->cra_driver_name;
2866 memmove(name + 10, name + 7,
2867 strlen(name) - 7);
2868 memcpy(name + 7, "esn", 3);
2869
2870 goto authencesn;
2871 }
2872 }
2873 } 2847 }
2874 } 2848 }
2875 if (!list_empty(&priv->alg_list)) 2849 if (!list_empty(&priv->alg_list))
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 8bc5fef07e7a..22c9063e0120 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = {
1750 .shutdown = ux500_cryp_shutdown, 1750 .shutdown = ux500_cryp_shutdown,
1751 .driver = { 1751 .driver = {
1752 .owner = THIS_MODULE, 1752 .owner = THIS_MODULE,
1753 .name = "cryp1" 1753 .name = "cryp1",
1754 .pm = &ux500_cryp_pm, 1754 .pm = &ux500_cryp_pm,
1755 } 1755 }
1756}; 1756};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 80b69971cf28..aeaea32bcfda 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -83,6 +83,7 @@ config INTEL_IOP_ADMA
83 83
84config DW_DMAC 84config DW_DMAC
85 tristate "Synopsys DesignWare AHB DMA support" 85 tristate "Synopsys DesignWare AHB DMA support"
86 depends on GENERIC_HARDIRQS
86 select DMA_ENGINE 87 select DMA_ENGINE
87 default y if CPU_AT32AP7000 88 default y if CPU_AT32AP7000
88 help 89 help
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index c599558faeda..43a5329d4483 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1001,6 +1001,13 @@ static inline void convert_burst(u32 *maxburst)
1001 *maxburst = 0; 1001 *maxburst = 0;
1002} 1002}
1003 1003
1004static inline void convert_slave_id(struct dw_dma_chan *dwc)
1005{
1006 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1007
1008 dwc->dma_sconfig.slave_id -= dw->request_line_base;
1009}
1010
1004static int 1011static int
1005set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) 1012set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
1006{ 1013{
@@ -1015,6 +1022,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
1015 1022
1016 convert_burst(&dwc->dma_sconfig.src_maxburst); 1023 convert_burst(&dwc->dma_sconfig.src_maxburst);
1017 convert_burst(&dwc->dma_sconfig.dst_maxburst); 1024 convert_burst(&dwc->dma_sconfig.dst_maxburst);
1025 convert_slave_id(dwc);
1018 1026
1019 return 0; 1027 return 0;
1020} 1028}
@@ -1276,9 +1284,9 @@ static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
1276 if (dma_spec->args_count != 3) 1284 if (dma_spec->args_count != 3)
1277 return NULL; 1285 return NULL;
1278 1286
1279 fargs.req = be32_to_cpup(dma_spec->args+0); 1287 fargs.req = dma_spec->args[0];
1280 fargs.src = be32_to_cpup(dma_spec->args+1); 1288 fargs.src = dma_spec->args[1];
1281 fargs.dst = be32_to_cpup(dma_spec->args+2); 1289 fargs.dst = dma_spec->args[2];
1282 1290
1283 if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS || 1291 if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
1284 fargs.src >= dw->nr_masters || 1292 fargs.src >= dw->nr_masters ||
@@ -1628,6 +1636,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
1628 1636
1629static int dw_probe(struct platform_device *pdev) 1637static int dw_probe(struct platform_device *pdev)
1630{ 1638{
1639 const struct platform_device_id *match;
1631 struct dw_dma_platform_data *pdata; 1640 struct dw_dma_platform_data *pdata;
1632 struct resource *io; 1641 struct resource *io;
1633 struct dw_dma *dw; 1642 struct dw_dma *dw;
@@ -1711,6 +1720,11 @@ static int dw_probe(struct platform_device *pdev)
1711 memcpy(dw->data_width, pdata->data_width, 4); 1720 memcpy(dw->data_width, pdata->data_width, 4);
1712 } 1721 }
1713 1722
1723 /* Get the base request line if set */
1724 match = platform_get_device_id(pdev);
1725 if (match)
1726 dw->request_line_base = (unsigned int)match->driver_data;
1727
1714 /* Calculate all channel mask before DMA setup */ 1728 /* Calculate all channel mask before DMA setup */
1715 dw->all_chan_mask = (1 << nr_channels) - 1; 1729 dw->all_chan_mask = (1 << nr_channels) - 1;
1716 1730
@@ -1906,7 +1920,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table);
1906#endif 1920#endif
1907 1921
1908static const struct platform_device_id dw_dma_ids[] = { 1922static const struct platform_device_id dw_dma_ids[] = {
1909 { "INTL9C60", 0 }, 1923 /* Name, Request Line Base */
1924 { "INTL9C60", (kernel_ulong_t)16 },
1910 { } 1925 { }
1911}; 1926};
1912 1927
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index cf0ce5c77d60..4d02c3669b75 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -247,6 +247,7 @@ struct dw_dma {
247 /* hardware configuration */ 247 /* hardware configuration */
248 unsigned char nr_masters; 248 unsigned char nr_masters;
249 unsigned char data_width[4]; 249 unsigned char data_width[4];
250 unsigned int request_line_base;
250 251
251 struct dw_dma_chan chan[0]; 252 struct dw_dma_chan chan[0];
252}; 253};
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index c4b4fd2acc42..08b43bf37158 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -276,12 +276,20 @@ static void omap_dma_issue_pending(struct dma_chan *chan)
276 276
277 spin_lock_irqsave(&c->vc.lock, flags); 277 spin_lock_irqsave(&c->vc.lock, flags);
278 if (vchan_issue_pending(&c->vc) && !c->desc) { 278 if (vchan_issue_pending(&c->vc) && !c->desc) {
279 struct omap_dmadev *d = to_omap_dma_dev(chan->device); 279 /*
280 spin_lock(&d->lock); 280 * c->cyclic is used only by audio and in this case the DMA need
281 if (list_empty(&c->node)) 281 * to be started without delay.
282 list_add_tail(&c->node, &d->pending); 282 */
283 spin_unlock(&d->lock); 283 if (!c->cyclic) {
284 tasklet_schedule(&d->task); 284 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
285 spin_lock(&d->lock);
286 if (list_empty(&c->node))
287 list_add_tail(&c->node, &d->pending);
288 spin_unlock(&d->lock);
289 tasklet_schedule(&d->task);
290 } else {
291 omap_dma_start_desc(c);
292 }
285 } 293 }
286 spin_unlock_irqrestore(&c->vc.lock, flags); 294 spin_unlock_irqrestore(&c->vc.lock, flags);
287} 295}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 718153122759..5dbc5946c4c3 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2882{ 2882{
2883 struct dma_pl330_platdata *pdat; 2883 struct dma_pl330_platdata *pdat;
2884 struct dma_pl330_dmac *pdmac; 2884 struct dma_pl330_dmac *pdmac;
2885 struct dma_pl330_chan *pch; 2885 struct dma_pl330_chan *pch, *_p;
2886 struct pl330_info *pi; 2886 struct pl330_info *pi;
2887 struct dma_device *pd; 2887 struct dma_device *pd;
2888 struct resource *res; 2888 struct resource *res;
@@ -2984,7 +2984,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2984 ret = dma_async_device_register(pd); 2984 ret = dma_async_device_register(pd);
2985 if (ret) { 2985 if (ret) {
2986 dev_err(&adev->dev, "unable to register DMAC\n"); 2986 dev_err(&adev->dev, "unable to register DMAC\n");
2987 goto probe_err2; 2987 goto probe_err3;
2988 }
2989
2990 if (adev->dev.of_node) {
2991 ret = of_dma_controller_register(adev->dev.of_node,
2992 of_dma_pl330_xlate, pdmac);
2993 if (ret) {
2994 dev_err(&adev->dev,
2995 "unable to register DMA to the generic DT DMA helpers\n");
2996 }
2988 } 2997 }
2989 2998
2990 dev_info(&adev->dev, 2999 dev_info(&adev->dev,
@@ -2995,16 +3004,21 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2995 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, 3004 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
2996 pi->pcfg.num_peri, pi->pcfg.num_events); 3005 pi->pcfg.num_peri, pi->pcfg.num_events);
2997 3006
2998 ret = of_dma_controller_register(adev->dev.of_node,
2999 of_dma_pl330_xlate, pdmac);
3000 if (ret) {
3001 dev_err(&adev->dev,
3002 "unable to register DMA to the generic DT DMA helpers\n");
3003 goto probe_err2;
3004 }
3005
3006 return 0; 3007 return 0;
3008probe_err3:
3009 amba_set_drvdata(adev, NULL);
3007 3010
3011 /* Idle the DMAC */
3012 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
3013 chan.device_node) {
3014
3015 /* Remove the channel */
3016 list_del(&pch->chan.device_node);
3017
3018 /* Flush the channel */
3019 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
3020 pl330_free_chan_resources(&pch->chan);
3021 }
3008probe_err2: 3022probe_err2:
3009 pl330_del(pi); 3023 pl330_del(pi);
3010probe_err1: 3024probe_err1:
@@ -3023,8 +3037,10 @@ static int pl330_remove(struct amba_device *adev)
3023 if (!pdmac) 3037 if (!pdmac)
3024 return 0; 3038 return 0;
3025 3039
3026 of_dma_controller_free(adev->dev.of_node); 3040 if (adev->dev.of_node)
3041 of_dma_controller_free(adev->dev.of_node);
3027 3042
3043 dma_async_device_unregister(&pdmac->ddma);
3028 amba_set_drvdata(adev, NULL); 3044 amba_set_drvdata(adev, NULL);
3029 3045
3030 /* Idle the DMAC */ 3046 /* Idle the DMAC */
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
index cdae207028a7..6c3fca97d346 100644
--- a/drivers/eisa/pci_eisa.c
+++ b/drivers/eisa/pci_eisa.c
@@ -19,10 +19,10 @@
19/* There is only *one* pci_eisa device per machine, right ? */ 19/* There is only *one* pci_eisa device per machine, right ? */
20static struct eisa_root_device pci_eisa_root; 20static struct eisa_root_device pci_eisa_root;
21 21
22static int __init pci_eisa_init(struct pci_dev *pdev, 22static int __init pci_eisa_init(struct pci_dev *pdev)
23 const struct pci_device_id *ent)
24{ 23{
25 int rc; 24 int rc, i;
25 struct resource *res, *bus_res = NULL;
26 26
27 if ((rc = pci_enable_device (pdev))) { 27 if ((rc = pci_enable_device (pdev))) {
28 printk (KERN_ERR "pci_eisa : Could not enable device %s\n", 28 printk (KERN_ERR "pci_eisa : Could not enable device %s\n",
@@ -30,9 +30,30 @@ static int __init pci_eisa_init(struct pci_dev *pdev,
30 return rc; 30 return rc;
31 } 31 }
32 32
33 /*
34 * The Intel 82375 PCI-EISA bridge is a subtractive-decode PCI
35 * device, so the resources available on EISA are the same as those
36 * available on the 82375 bus. This works the same as a PCI-PCI
37 * bridge in subtractive-decode mode (see pci_read_bridge_bases()).
38 * We assume other PCI-EISA bridges are similar.
39 *
40 * eisa_root_register() can only deal with a single io port resource,
41 * so we use the first valid io port resource.
42 */
43 pci_bus_for_each_resource(pdev->bus, res, i)
44 if (res && (res->flags & IORESOURCE_IO)) {
45 bus_res = res;
46 break;
47 }
48
49 if (!bus_res) {
50 dev_err(&pdev->dev, "No resources available\n");
51 return -1;
52 }
53
33 pci_eisa_root.dev = &pdev->dev; 54 pci_eisa_root.dev = &pdev->dev;
34 pci_eisa_root.res = pdev->bus->resource[0]; 55 pci_eisa_root.res = bus_res;
35 pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start; 56 pci_eisa_root.bus_base_addr = bus_res->start;
36 pci_eisa_root.slots = EISA_MAX_SLOTS; 57 pci_eisa_root.slots = EISA_MAX_SLOTS;
37 pci_eisa_root.dma_mask = pdev->dma_mask; 58 pci_eisa_root.dma_mask = pdev->dma_mask;
38 dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root); 59 dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root);
@@ -45,22 +66,26 @@ static int __init pci_eisa_init(struct pci_dev *pdev,
45 return 0; 66 return 0;
46} 67}
47 68
48static struct pci_device_id pci_eisa_pci_tbl[] = { 69/*
49 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 70 * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init().
50 PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, 71 * Otherwise pnp resource will get enabled early and could prevent eisa
51 { 0, } 72 * to be initialized.
52}; 73 * Also need to make sure pci_eisa_init_early() is called after
74 * x86/pci_subsys_init().
75 * So need to use subsys_initcall_sync with it.
76 */
77static int __init pci_eisa_init_early(void)
78{
79 struct pci_dev *dev = NULL;
80 int ret;
53 81
54static struct pci_driver __refdata pci_eisa_driver = { 82 for_each_pci_dev(dev)
55 .name = "pci_eisa", 83 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) {
56 .id_table = pci_eisa_pci_tbl, 84 ret = pci_eisa_init(dev);
57 .probe = pci_eisa_init, 85 if (ret)
58}; 86 return ret;
87 }
59 88
60static int __init pci_eisa_init_module (void) 89 return 0;
61{
62 return pci_register_driver (&pci_eisa_driver);
63} 90}
64 91subsys_initcall_sync(pci_eisa_init_early);
65device_initcall(pci_eisa_init_module);
66MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl);
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index b70e3815c459..8f3c947b0029 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -32,6 +32,38 @@
32#define DEV_NAME "max77693-muic" 32#define DEV_NAME "max77693-muic"
33#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */ 33#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
34 34
35/*
36 * Default value of MAX77693 register to bring up MUIC device.
37 * If user don't set some initial value for MUIC device through platform data,
38 * extcon-max77693 driver use 'default_init_data' to bring up base operation
39 * of MAX77693 MUIC device.
40 */
41struct max77693_reg_data default_init_data[] = {
42 {
43 /* STATUS2 - [3]ChgDetRun */
44 .addr = MAX77693_MUIC_REG_STATUS2,
45 .data = STATUS2_CHGDETRUN_MASK,
46 }, {
47 /* INTMASK1 - Unmask [3]ADC1KM,[0]ADCM */
48 .addr = MAX77693_MUIC_REG_INTMASK1,
49 .data = INTMASK1_ADC1K_MASK
50 | INTMASK1_ADC_MASK,
51 }, {
52 /* INTMASK2 - Unmask [0]ChgTypM */
53 .addr = MAX77693_MUIC_REG_INTMASK2,
54 .data = INTMASK2_CHGTYP_MASK,
55 }, {
56 /* INTMASK3 - Mask all of interrupts */
57 .addr = MAX77693_MUIC_REG_INTMASK3,
58 .data = 0x0,
59 }, {
60 /* CDETCTRL2 */
61 .addr = MAX77693_MUIC_REG_CDETCTRL2,
62 .data = CDETCTRL2_VIDRMEN_MASK
63 | CDETCTRL2_DXOVPEN_MASK,
64 },
65};
66
35enum max77693_muic_adc_debounce_time { 67enum max77693_muic_adc_debounce_time {
36 ADC_DEBOUNCE_TIME_5MS = 0, 68 ADC_DEBOUNCE_TIME_5MS = 0,
37 ADC_DEBOUNCE_TIME_10MS, 69 ADC_DEBOUNCE_TIME_10MS,
@@ -1045,8 +1077,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
1045{ 1077{
1046 struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent); 1078 struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent);
1047 struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev); 1079 struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
1048 struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
1049 struct max77693_muic_info *info; 1080 struct max77693_muic_info *info;
1081 struct max77693_reg_data *init_data;
1082 int num_init_data;
1050 int delay_jiffies; 1083 int delay_jiffies;
1051 int ret; 1084 int ret;
1052 int i; 1085 int i;
@@ -1145,15 +1178,25 @@ static int max77693_muic_probe(struct platform_device *pdev)
1145 goto err_irq; 1178 goto err_irq;
1146 } 1179 }
1147 1180
1148 /* Initialize MUIC register by using platform data */ 1181
1149 for (i = 0 ; i < muic_pdata->num_init_data ; i++) { 1182 /* Initialize MUIC register by using platform data or default data */
1150 enum max77693_irq_source irq_src = MAX77693_IRQ_GROUP_NR; 1183 if (pdata->muic_data) {
1184 init_data = pdata->muic_data->init_data;
1185 num_init_data = pdata->muic_data->num_init_data;
1186 } else {
1187 init_data = default_init_data;
1188 num_init_data = ARRAY_SIZE(default_init_data);
1189 }
1190
1191 for (i = 0 ; i < num_init_data ; i++) {
1192 enum max77693_irq_source irq_src
1193 = MAX77693_IRQ_GROUP_NR;
1151 1194
1152 max77693_write_reg(info->max77693->regmap_muic, 1195 max77693_write_reg(info->max77693->regmap_muic,
1153 muic_pdata->init_data[i].addr, 1196 init_data[i].addr,
1154 muic_pdata->init_data[i].data); 1197 init_data[i].data);
1155 1198
1156 switch (muic_pdata->init_data[i].addr) { 1199 switch (init_data[i].addr) {
1157 case MAX77693_MUIC_REG_INTMASK1: 1200 case MAX77693_MUIC_REG_INTMASK1:
1158 irq_src = MUIC_INT1; 1201 irq_src = MUIC_INT1;
1159 break; 1202 break;
@@ -1167,22 +1210,40 @@ static int max77693_muic_probe(struct platform_device *pdev)
1167 1210
1168 if (irq_src < MAX77693_IRQ_GROUP_NR) 1211 if (irq_src < MAX77693_IRQ_GROUP_NR)
1169 info->max77693->irq_masks_cur[irq_src] 1212 info->max77693->irq_masks_cur[irq_src]
1170 = muic_pdata->init_data[i].data; 1213 = init_data[i].data;
1171 } 1214 }
1172 1215
1173 /* 1216 if (pdata->muic_data) {
1174 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB 1217 struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
1175 * h/w path of COMP2/COMN1 on CONTROL1 register.
1176 */
1177 if (muic_pdata->path_uart)
1178 info->path_uart = muic_pdata->path_uart;
1179 else
1180 info->path_uart = CONTROL1_SW_UART;
1181 1218
1182 if (muic_pdata->path_usb) 1219 /*
1183 info->path_usb = muic_pdata->path_usb; 1220 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
1184 else 1221 * h/w path of COMP2/COMN1 on CONTROL1 register.
1222 */
1223 if (muic_pdata->path_uart)
1224 info->path_uart = muic_pdata->path_uart;
1225 else
1226 info->path_uart = CONTROL1_SW_UART;
1227
1228 if (muic_pdata->path_usb)
1229 info->path_usb = muic_pdata->path_usb;
1230 else
1231 info->path_usb = CONTROL1_SW_USB;
1232
1233 /*
1234 * Default delay time for detecting cable state
1235 * after certain time.
1236 */
1237 if (muic_pdata->detcable_delay_ms)
1238 delay_jiffies =
1239 msecs_to_jiffies(muic_pdata->detcable_delay_ms);
1240 else
1241 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
1242 } else {
1185 info->path_usb = CONTROL1_SW_USB; 1243 info->path_usb = CONTROL1_SW_USB;
1244 info->path_uart = CONTROL1_SW_UART;
1245 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
1246 }
1186 1247
1187 /* Set initial path for UART */ 1248 /* Set initial path for UART */
1188 max77693_muic_set_path(info, info->path_uart, true); 1249 max77693_muic_set_path(info, info->path_uart, true);
@@ -1208,10 +1269,6 @@ static int max77693_muic_probe(struct platform_device *pdev)
1208 * driver should notify cable state to upper layer. 1269 * driver should notify cable state to upper layer.
1209 */ 1270 */
1210 INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq); 1271 INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq);
1211 if (muic_pdata->detcable_delay_ms)
1212 delay_jiffies = msecs_to_jiffies(muic_pdata->detcable_delay_ms);
1213 else
1214 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
1215 schedule_delayed_work(&info->wq_detcable, delay_jiffies); 1272 schedule_delayed_work(&info->wq_detcable, delay_jiffies);
1216 1273
1217 return ret; 1274 return ret;
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index e636d950ad6c..69641bcae325 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -712,29 +712,45 @@ static int max8997_muic_probe(struct platform_device *pdev)
712 goto err_irq; 712 goto err_irq;
713 } 713 }
714 714
715 /* Initialize registers according to platform data */
716 if (pdata->muic_pdata) { 715 if (pdata->muic_pdata) {
717 struct max8997_muic_platform_data *mdata = info->muic_pdata; 716 struct max8997_muic_platform_data *muic_pdata
718 717 = pdata->muic_pdata;
719 for (i = 0; i < mdata->num_init_data; i++) { 718
720 max8997_write_reg(info->muic, mdata->init_data[i].addr, 719 /* Initialize registers according to platform data */
721 mdata->init_data[i].data); 720 for (i = 0; i < muic_pdata->num_init_data; i++) {
721 max8997_write_reg(info->muic,
722 muic_pdata->init_data[i].addr,
723 muic_pdata->init_data[i].data);
722 } 724 }
723 }
724 725
725 /* 726 /*
726 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB 727 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
727 * h/w path of COMP2/COMN1 on CONTROL1 register. 728 * h/w path of COMP2/COMN1 on CONTROL1 register.
728 */ 729 */
729 if (pdata->muic_pdata->path_uart) 730 if (muic_pdata->path_uart)
730 info->path_uart = pdata->muic_pdata->path_uart; 731 info->path_uart = muic_pdata->path_uart;
731 else 732 else
732 info->path_uart = CONTROL1_SW_UART; 733 info->path_uart = CONTROL1_SW_UART;
733 734
734 if (pdata->muic_pdata->path_usb) 735 if (muic_pdata->path_usb)
735 info->path_usb = pdata->muic_pdata->path_usb; 736 info->path_usb = muic_pdata->path_usb;
736 else 737 else
738 info->path_usb = CONTROL1_SW_USB;
739
740 /*
741 * Default delay time for detecting cable state
742 * after certain time.
743 */
744 if (muic_pdata->detcable_delay_ms)
745 delay_jiffies =
746 msecs_to_jiffies(muic_pdata->detcable_delay_ms);
747 else
748 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
749 } else {
750 info->path_uart = CONTROL1_SW_UART;
737 info->path_usb = CONTROL1_SW_USB; 751 info->path_usb = CONTROL1_SW_USB;
752 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
753 }
738 754
739 /* Set initial path for UART */ 755 /* Set initial path for UART */
740 max8997_muic_set_path(info, info->path_uart, true); 756 max8997_muic_set_path(info, info->path_uart, true);
@@ -751,10 +767,6 @@ static int max8997_muic_probe(struct platform_device *pdev)
751 * driver should notify cable state to upper layer. 767 * driver should notify cable state to upper layer.
752 */ 768 */
753 INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq); 769 INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq);
754 if (pdata->muic_pdata->detcable_delay_ms)
755 delay_jiffies = msecs_to_jiffies(pdata->muic_pdata->detcable_delay_ms);
756 else
757 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
758 schedule_delayed_work(&info->wq_detcable, delay_jiffies); 770 schedule_delayed_work(&info->wq_detcable, delay_jiffies);
759 771
760 return 0; 772 return 0;
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 9b00072a020f..42c759a4d047 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -53,6 +53,24 @@ config EFI_VARS
53 Subsequent efibootmgr releases may be found at: 53 Subsequent efibootmgr releases may be found at:
54 <http://linux.dell.com/efibootmgr> 54 <http://linux.dell.com/efibootmgr>
55 55
56config EFI_VARS_PSTORE
57 bool "Register efivars backend for pstore"
58 depends on EFI_VARS && PSTORE
59 default y
60 help
61 Say Y here to enable use efivars as a backend to pstore. This
62 will allow writing console messages, crash dumps, or anything
63 else supported by pstore to EFI variables.
64
65config EFI_VARS_PSTORE_DEFAULT_DISABLE
66 bool "Disable using efivars as a pstore backend by default"
67 depends on EFI_VARS_PSTORE
68 default n
69 help
70 Saying Y here will disable the use of efivars as a storage
71 backend for pstore by default. This setting can be overridden
72 using the efivars module's pstore_disable parameter.
73
56config EFI_PCDP 74config EFI_PCDP
57 bool "Console device selection via EFI PCDP or HCDP table" 75 bool "Console device selection via EFI PCDP or HCDP table"
58 depends on ACPI && EFI && IA64 76 depends on ACPI && EFI && IA64
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index fe62aa392239..7acafb80fd4c 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -103,6 +103,11 @@ MODULE_VERSION(EFIVARS_VERSION);
103 */ 103 */
104#define GUID_LEN 36 104#define GUID_LEN 36
105 105
106static bool efivars_pstore_disable =
107 IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
108
109module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
110
106/* 111/*
107 * The maximum size of VariableName + Data = 1024 112 * The maximum size of VariableName + Data = 1024
108 * Therefore, it's reasonable to save that much 113 * Therefore, it's reasonable to save that much
@@ -165,6 +170,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
165 170
166static void efivar_update_sysfs_entries(struct work_struct *); 171static void efivar_update_sysfs_entries(struct work_struct *);
167static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries); 172static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
173static bool efivar_wq_enabled = true;
168 174
169/* Return the number of unicode characters in data */ 175/* Return the number of unicode characters in data */
170static unsigned long 176static unsigned long
@@ -1309,9 +1315,7 @@ static const struct inode_operations efivarfs_dir_inode_operations = {
1309 .create = efivarfs_create, 1315 .create = efivarfs_create,
1310}; 1316};
1311 1317
1312static struct pstore_info efi_pstore_info; 1318#ifdef CONFIG_EFI_VARS_PSTORE
1313
1314#ifdef CONFIG_PSTORE
1315 1319
1316static int efi_pstore_open(struct pstore_info *psi) 1320static int efi_pstore_open(struct pstore_info *psi)
1317{ 1321{
@@ -1441,7 +1445,7 @@ static int efi_pstore_write(enum pstore_type_id type,
1441 1445
1442 spin_unlock_irqrestore(&efivars->lock, flags); 1446 spin_unlock_irqrestore(&efivars->lock, flags);
1443 1447
1444 if (reason == KMSG_DUMP_OOPS) 1448 if (reason == KMSG_DUMP_OOPS && efivar_wq_enabled)
1445 schedule_work(&efivar_work); 1449 schedule_work(&efivar_work);
1446 1450
1447 *id = part; 1451 *id = part;
@@ -1514,38 +1518,6 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
1514 1518
1515 return 0; 1519 return 0;
1516} 1520}
1517#else
1518static int efi_pstore_open(struct pstore_info *psi)
1519{
1520 return 0;
1521}
1522
1523static int efi_pstore_close(struct pstore_info *psi)
1524{
1525 return 0;
1526}
1527
1528static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count,
1529 struct timespec *timespec,
1530 char **buf, struct pstore_info *psi)
1531{
1532 return -1;
1533}
1534
1535static int efi_pstore_write(enum pstore_type_id type,
1536 enum kmsg_dump_reason reason, u64 *id,
1537 unsigned int part, int count, size_t size,
1538 struct pstore_info *psi)
1539{
1540 return 0;
1541}
1542
1543static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
1544 struct timespec time, struct pstore_info *psi)
1545{
1546 return 0;
1547}
1548#endif
1549 1521
1550static struct pstore_info efi_pstore_info = { 1522static struct pstore_info efi_pstore_info = {
1551 .owner = THIS_MODULE, 1523 .owner = THIS_MODULE,
@@ -1557,6 +1529,24 @@ static struct pstore_info efi_pstore_info = {
1557 .erase = efi_pstore_erase, 1529 .erase = efi_pstore_erase,
1558}; 1530};
1559 1531
1532static void efivar_pstore_register(struct efivars *efivars)
1533{
1534 efivars->efi_pstore_info = efi_pstore_info;
1535 efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
1536 if (efivars->efi_pstore_info.buf) {
1537 efivars->efi_pstore_info.bufsize = 1024;
1538 efivars->efi_pstore_info.data = efivars;
1539 spin_lock_init(&efivars->efi_pstore_info.buf_lock);
1540 pstore_register(&efivars->efi_pstore_info);
1541 }
1542}
1543#else
1544static void efivar_pstore_register(struct efivars *efivars)
1545{
1546 return;
1547}
1548#endif
1549
1560static ssize_t efivar_create(struct file *filp, struct kobject *kobj, 1550static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
1561 struct bin_attribute *bin_attr, 1551 struct bin_attribute *bin_attr,
1562 char *buf, loff_t pos, size_t count) 1552 char *buf, loff_t pos, size_t count)
@@ -1716,6 +1706,31 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
1716 return found; 1706 return found;
1717} 1707}
1718 1708
1709/*
1710 * Returns the size of variable_name, in bytes, including the
1711 * terminating NULL character, or variable_name_size if no NULL
1712 * character is found among the first variable_name_size bytes.
1713 */
1714static unsigned long var_name_strnsize(efi_char16_t *variable_name,
1715 unsigned long variable_name_size)
1716{
1717 unsigned long len;
1718 efi_char16_t c;
1719
1720 /*
1721 * The variable name is, by definition, a NULL-terminated
1722 * string, so make absolutely sure that variable_name_size is
1723 * the value we expect it to be. If not, return the real size.
1724 */
1725 for (len = 2; len <= variable_name_size; len += sizeof(c)) {
1726 c = variable_name[(len / sizeof(c)) - 1];
1727 if (!c)
1728 break;
1729 }
1730
1731 return min(len, variable_name_size);
1732}
1733
1719static void efivar_update_sysfs_entries(struct work_struct *work) 1734static void efivar_update_sysfs_entries(struct work_struct *work)
1720{ 1735{
1721 struct efivars *efivars = &__efivars; 1736 struct efivars *efivars = &__efivars;
@@ -1756,10 +1771,13 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
1756 if (!found) { 1771 if (!found) {
1757 kfree(variable_name); 1772 kfree(variable_name);
1758 break; 1773 break;
1759 } else 1774 } else {
1775 variable_name_size = var_name_strnsize(variable_name,
1776 variable_name_size);
1760 efivar_create_sysfs_entry(efivars, 1777 efivar_create_sysfs_entry(efivars,
1761 variable_name_size, 1778 variable_name_size,
1762 variable_name, &vendor); 1779 variable_name, &vendor);
1780 }
1763 } 1781 }
1764} 1782}
1765 1783
@@ -1958,6 +1976,35 @@ void unregister_efivars(struct efivars *efivars)
1958} 1976}
1959EXPORT_SYMBOL_GPL(unregister_efivars); 1977EXPORT_SYMBOL_GPL(unregister_efivars);
1960 1978
1979/*
1980 * Print a warning when duplicate EFI variables are encountered and
1981 * disable the sysfs workqueue since the firmware is buggy.
1982 */
1983static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
1984 unsigned long len16)
1985{
1986 size_t i, len8 = len16 / sizeof(efi_char16_t);
1987 char *s8;
1988
1989 /*
1990 * Disable the workqueue since the algorithm it uses for
1991 * detecting new variables won't work with this buggy
1992 * implementation of GetNextVariableName().
1993 */
1994 efivar_wq_enabled = false;
1995
1996 s8 = kzalloc(len8, GFP_KERNEL);
1997 if (!s8)
1998 return;
1999
2000 for (i = 0; i < len8; i++)
2001 s8[i] = s16[i];
2002
2003 printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
2004 s8, vendor_guid);
2005 kfree(s8);
2006}
2007
1961int register_efivars(struct efivars *efivars, 2008int register_efivars(struct efivars *efivars,
1962 const struct efivar_operations *ops, 2009 const struct efivar_operations *ops,
1963 struct kobject *parent_kobj) 2010 struct kobject *parent_kobj)
@@ -2006,6 +2053,24 @@ int register_efivars(struct efivars *efivars,
2006 &vendor_guid); 2053 &vendor_guid);
2007 switch (status) { 2054 switch (status) {
2008 case EFI_SUCCESS: 2055 case EFI_SUCCESS:
2056 variable_name_size = var_name_strnsize(variable_name,
2057 variable_name_size);
2058
2059 /*
2060 * Some firmware implementations return the
2061 * same variable name on multiple calls to
2062 * get_next_variable(). Terminate the loop
2063 * immediately as there is no guarantee that
2064 * we'll ever see a different variable name,
2065 * and may end up looping here forever.
2066 */
2067 if (variable_is_present(variable_name, &vendor_guid)) {
2068 dup_variable_bug(variable_name, &vendor_guid,
2069 variable_name_size);
2070 status = EFI_NOT_FOUND;
2071 break;
2072 }
2073
2009 efivar_create_sysfs_entry(efivars, 2074 efivar_create_sysfs_entry(efivars,
2010 variable_name_size, 2075 variable_name_size,
2011 variable_name, 2076 variable_name,
@@ -2025,15 +2090,8 @@ int register_efivars(struct efivars *efivars,
2025 if (error) 2090 if (error)
2026 unregister_efivars(efivars); 2091 unregister_efivars(efivars);
2027 2092
2028 efivars->efi_pstore_info = efi_pstore_info; 2093 if (!efivars_pstore_disable)
2029 2094 efivar_pstore_register(efivars);
2030 efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
2031 if (efivars->efi_pstore_info.buf) {
2032 efivars->efi_pstore_info.bufsize = 1024;
2033 efivars->efi_pstore_info.data = efivars;
2034 spin_lock_init(&efivars->efi_pstore_info.buf_lock);
2035 pstore_register(&efivars->efi_pstore_info);
2036 }
2037 2095
2038 register_filesystem(&efivarfs_type); 2096 register_filesystem(&efivarfs_type);
2039 2097
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index f9dbd503fc40..de3c317bd3e2 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -214,7 +214,7 @@ static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
214 * If it can't be trusted, assume that the pin can be used as a GPIO. 214 * If it can't be trusted, assume that the pin can be used as a GPIO.
215 */ 215 */
216 if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f))) 216 if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f)))
217 return 1; 217 return 0;
218 218
219 return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV; 219 return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;
220} 220}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 24059462c87f..9391cf16e990 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
575 chip->gpio_chip.ngpio, 575 chip->gpio_chip.ngpio,
576 irq_base, 576 irq_base,
577 &pca953x_irq_simple_ops, 577 &pca953x_irq_simple_ops,
578 NULL); 578 chip);
579 if (!chip->domain) 579 if (!chip->domain)
580 return -ENODEV; 580 return -ENODEV;
581 581
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 770476a9da87..3ce5bc38ac31 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -307,11 +307,15 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = {
307 .xlate = irq_domain_xlate_twocell, 307 .xlate = irq_domain_xlate_twocell,
308}; 308};
309 309
310static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio) 310static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio,
311 struct device_node *np)
311{ 312{
312 int base = stmpe_gpio->irq_base; 313 int base = 0;
313 314
314 stmpe_gpio->domain = irq_domain_add_simple(NULL, 315 if (!np)
316 base = stmpe_gpio->irq_base;
317
318 stmpe_gpio->domain = irq_domain_add_simple(np,
315 stmpe_gpio->chip.ngpio, base, 319 stmpe_gpio->chip.ngpio, base,
316 &stmpe_gpio_irq_simple_ops, stmpe_gpio); 320 &stmpe_gpio_irq_simple_ops, stmpe_gpio);
317 if (!stmpe_gpio->domain) { 321 if (!stmpe_gpio->domain) {
@@ -346,6 +350,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
346 stmpe_gpio->chip = template_chip; 350 stmpe_gpio->chip = template_chip;
347 stmpe_gpio->chip.ngpio = stmpe->num_gpios; 351 stmpe_gpio->chip.ngpio = stmpe->num_gpios;
348 stmpe_gpio->chip.dev = &pdev->dev; 352 stmpe_gpio->chip.dev = &pdev->dev;
353#ifdef CONFIG_OF
354 stmpe_gpio->chip.of_node = np;
355#endif
349 stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1; 356 stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1;
350 357
351 if (pdata) 358 if (pdata)
@@ -366,7 +373,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
366 goto out_free; 373 goto out_free;
367 374
368 if (irq >= 0) { 375 if (irq >= 0) {
369 ret = stmpe_gpio_irq_init(stmpe_gpio); 376 ret = stmpe_gpio_irq_init(stmpe_gpio, np);
370 if (ret) 377 if (ret)
371 goto out_disable; 378 goto out_disable;
372 379
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a71a54a3e3f7..5150df6cba08 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -193,7 +193,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
193 if (!np) 193 if (!np)
194 return; 194 return;
195 195
196 do { 196 for (;; index++) {
197 ret = of_parse_phandle_with_args(np, "gpio-ranges", 197 ret = of_parse_phandle_with_args(np, "gpio-ranges",
198 "#gpio-range-cells", index, &pinspec); 198 "#gpio-range-cells", index, &pinspec);
199 if (ret) 199 if (ret)
@@ -222,8 +222,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
222 222
223 if (ret) 223 if (ret)
224 break; 224 break;
225 225 }
226 } while (index++);
227} 226}
228 227
229#else 228#else
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 792c3e3795ca..dd64a06dc5b4 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2326,7 +2326,6 @@ int drm_mode_addfb(struct drm_device *dev,
2326 fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); 2326 fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
2327 if (IS_ERR(fb)) { 2327 if (IS_ERR(fb)) {
2328 DRM_DEBUG_KMS("could not create framebuffer\n"); 2328 DRM_DEBUG_KMS("could not create framebuffer\n");
2329 drm_modeset_unlock_all(dev);
2330 return PTR_ERR(fb); 2329 return PTR_ERR(fb);
2331 } 2330 }
2332 2331
@@ -2506,7 +2505,6 @@ int drm_mode_addfb2(struct drm_device *dev,
2506 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); 2505 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
2507 if (IS_ERR(fb)) { 2506 if (IS_ERR(fb)) {
2508 DRM_DEBUG_KMS("could not create framebuffer\n"); 2507 DRM_DEBUG_KMS("could not create framebuffer\n");
2509 drm_modeset_unlock_all(dev);
2510 return PTR_ERR(fb); 2508 return PTR_ERR(fb);
2511 } 2509 }
2512 2510
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 59d6b9bf204b..892ff9f95975 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1544,10 +1544,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1544 if (!fb_helper->fb) 1544 if (!fb_helper->fb)
1545 return 0; 1545 return 0;
1546 1546
1547 drm_modeset_lock_all(dev); 1547 mutex_lock(&fb_helper->dev->mode_config.mutex);
1548 if (!drm_fb_helper_is_bound(fb_helper)) { 1548 if (!drm_fb_helper_is_bound(fb_helper)) {
1549 fb_helper->delayed_hotplug = true; 1549 fb_helper->delayed_hotplug = true;
1550 drm_modeset_unlock_all(dev); 1550 mutex_unlock(&fb_helper->dev->mode_config.mutex);
1551 return 0; 1551 return 0;
1552 } 1552 }
1553 DRM_DEBUG_KMS("\n"); 1553 DRM_DEBUG_KMS("\n");
@@ -1558,9 +1558,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1558 1558
1559 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, 1559 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
1560 max_height); 1560 max_height);
1561 mutex_unlock(&fb_helper->dev->mode_config.mutex);
1562
1563 drm_modeset_lock_all(dev);
1561 drm_setup_crtcs(fb_helper); 1564 drm_setup_crtcs(fb_helper);
1562 drm_modeset_unlock_all(dev); 1565 drm_modeset_unlock_all(dev);
1563
1564 drm_fb_helper_set_par(fb_helper->fbdev); 1566 drm_fb_helper_set_par(fb_helper->fbdev);
1565 1567
1566 return 0; 1568 return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 13fdcd10a605..429e07d0b0f1 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -123,6 +123,7 @@ int drm_open(struct inode *inode, struct file *filp)
123 int retcode = 0; 123 int retcode = 0;
124 int need_setup = 0; 124 int need_setup = 0;
125 struct address_space *old_mapping; 125 struct address_space *old_mapping;
126 struct address_space *old_imapping;
126 127
127 minor = idr_find(&drm_minors_idr, minor_id); 128 minor = idr_find(&drm_minors_idr, minor_id);
128 if (!minor) 129 if (!minor)
@@ -137,6 +138,7 @@ int drm_open(struct inode *inode, struct file *filp)
137 if (!dev->open_count++) 138 if (!dev->open_count++)
138 need_setup = 1; 139 need_setup = 1;
139 mutex_lock(&dev->struct_mutex); 140 mutex_lock(&dev->struct_mutex);
141 old_imapping = inode->i_mapping;
140 old_mapping = dev->dev_mapping; 142 old_mapping = dev->dev_mapping;
141 if (old_mapping == NULL) 143 if (old_mapping == NULL)
142 dev->dev_mapping = &inode->i_data; 144 dev->dev_mapping = &inode->i_data;
@@ -159,8 +161,8 @@ int drm_open(struct inode *inode, struct file *filp)
159 161
160err_undo: 162err_undo:
161 mutex_lock(&dev->struct_mutex); 163 mutex_lock(&dev->struct_mutex);
162 filp->f_mapping = old_mapping; 164 filp->f_mapping = old_imapping;
163 inode->i_mapping = old_mapping; 165 inode->i_mapping = old_imapping;
164 iput(container_of(dev->dev_mapping, struct inode, i_data)); 166 iput(container_of(dev->dev_mapping, struct inode, i_data));
165 dev->dev_mapping = old_mapping; 167 dev->dev_mapping = old_mapping;
166 mutex_unlock(&dev->struct_mutex); 168 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 36493ce71f9a..98cc14725ba9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -38,11 +38,12 @@
38/* position control register for hardware window 0, 2 ~ 4.*/ 38/* position control register for hardware window 0, 2 ~ 4.*/
39#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16) 39#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
40#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16) 40#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16)
41/* size control register for hardware window 0. */ 41/*
42#define VIDOSD_C_SIZE_W0 (VIDOSD_BASE + 0x08) 42 * size control register for hardware windows 0 and alpha control register
43/* alpha control register for hardware window 1 ~ 4. */ 43 * for hardware windows 1 ~ 4
44#define VIDOSD_C(win) (VIDOSD_BASE + 0x18 + (win) * 16) 44 */
45/* size control register for hardware window 1 ~ 4. */ 45#define VIDOSD_C(win) (VIDOSD_BASE + 0x08 + (win) * 16)
46/* size control register for hardware windows 1 ~ 2. */
46#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16) 47#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16)
47 48
48#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8) 49#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8)
@@ -50,9 +51,9 @@
50#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4) 51#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4)
51 52
52/* color key control register for hardware window 1 ~ 4. */ 53/* color key control register for hardware window 1 ~ 4. */
53#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + (x * 8)) 54#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + ((x - 1) * 8))
54/* color key value register for hardware window 1 ~ 4. */ 55/* color key value register for hardware window 1 ~ 4. */
55#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + (x * 8)) 56#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8))
56 57
57/* FIMD has totally five hardware windows. */ 58/* FIMD has totally five hardware windows. */
58#define WINDOWS_NR 5 59#define WINDOWS_NR 5
@@ -109,9 +110,9 @@ struct fimd_context {
109 110
110#ifdef CONFIG_OF 111#ifdef CONFIG_OF
111static const struct of_device_id fimd_driver_dt_match[] = { 112static const struct of_device_id fimd_driver_dt_match[] = {
112 { .compatible = "samsung,exynos4-fimd", 113 { .compatible = "samsung,exynos4210-fimd",
113 .data = &exynos4_fimd_driver_data }, 114 .data = &exynos4_fimd_driver_data },
114 { .compatible = "samsung,exynos5-fimd", 115 { .compatible = "samsung,exynos5250-fimd",
115 .data = &exynos5_fimd_driver_data }, 116 .data = &exynos5_fimd_driver_data },
116 {}, 117 {},
117}; 118};
@@ -581,7 +582,7 @@ static void fimd_win_commit(struct device *dev, int zpos)
581 if (win != 3 && win != 4) { 582 if (win != 3 && win != 4) {
582 u32 offset = VIDOSD_D(win); 583 u32 offset = VIDOSD_D(win);
583 if (win == 0) 584 if (win == 0)
584 offset = VIDOSD_C_SIZE_W0; 585 offset = VIDOSD_C(win);
585 val = win_data->ovl_width * win_data->ovl_height; 586 val = win_data->ovl_width * win_data->ovl_height;
586 writel(val, ctx->regs + offset); 587 writel(val, ctx->regs + offset);
587 588
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3b0da0378acf..47a493c8a71f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -48,8 +48,14 @@
48 48
49/* registers for base address */ 49/* registers for base address */
50#define G2D_SRC_BASE_ADDR 0x0304 50#define G2D_SRC_BASE_ADDR 0x0304
51#define G2D_SRC_COLOR_MODE 0x030C
52#define G2D_SRC_LEFT_TOP 0x0310
53#define G2D_SRC_RIGHT_BOTTOM 0x0314
51#define G2D_SRC_PLANE2_BASE_ADDR 0x0318 54#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
52#define G2D_DST_BASE_ADDR 0x0404 55#define G2D_DST_BASE_ADDR 0x0404
56#define G2D_DST_COLOR_MODE 0x040C
57#define G2D_DST_LEFT_TOP 0x0410
58#define G2D_DST_RIGHT_BOTTOM 0x0414
53#define G2D_DST_PLANE2_BASE_ADDR 0x0418 59#define G2D_DST_PLANE2_BASE_ADDR 0x0418
54#define G2D_PAT_BASE_ADDR 0x0500 60#define G2D_PAT_BASE_ADDR 0x0500
55#define G2D_MSK_BASE_ADDR 0x0520 61#define G2D_MSK_BASE_ADDR 0x0520
@@ -82,7 +88,7 @@
82#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17 88#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
83 89
84/* G2D_DMA_HOLD_CMD */ 90/* G2D_DMA_HOLD_CMD */
85#define G2D_USET_HOLD (1 << 2) 91#define G2D_USER_HOLD (1 << 2)
86#define G2D_LIST_HOLD (1 << 1) 92#define G2D_LIST_HOLD (1 << 1)
87#define G2D_BITBLT_HOLD (1 << 0) 93#define G2D_BITBLT_HOLD (1 << 0)
88 94
@@ -91,13 +97,27 @@
91#define G2D_START_NHOLT (1 << 1) 97#define G2D_START_NHOLT (1 << 1)
92#define G2D_START_BITBLT (1 << 0) 98#define G2D_START_BITBLT (1 << 0)
93 99
100/* buffer color format */
101#define G2D_FMT_XRGB8888 0
102#define G2D_FMT_ARGB8888 1
103#define G2D_FMT_RGB565 2
104#define G2D_FMT_XRGB1555 3
105#define G2D_FMT_ARGB1555 4
106#define G2D_FMT_XRGB4444 5
107#define G2D_FMT_ARGB4444 6
108#define G2D_FMT_PACKED_RGB888 7
109#define G2D_FMT_A8 11
110#define G2D_FMT_L8 12
111
112/* buffer valid length */
113#define G2D_LEN_MIN 1
114#define G2D_LEN_MAX 8000
115
94#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4) 116#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
95#define G2D_CMDLIST_NUM 64 117#define G2D_CMDLIST_NUM 64
96#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) 118#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
97#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) 119#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
98 120
99#define MAX_BUF_ADDR_NR 6
100
101/* maximum buffer pool size of userptr is 64MB as default */ 121/* maximum buffer pool size of userptr is 64MB as default */
102#define MAX_POOL (64 * 1024 * 1024) 122#define MAX_POOL (64 * 1024 * 1024)
103 123
@@ -106,6 +126,17 @@ enum {
106 BUF_TYPE_USERPTR, 126 BUF_TYPE_USERPTR,
107}; 127};
108 128
129enum g2d_reg_type {
130 REG_TYPE_NONE = -1,
131 REG_TYPE_SRC,
132 REG_TYPE_SRC_PLANE2,
133 REG_TYPE_DST,
134 REG_TYPE_DST_PLANE2,
135 REG_TYPE_PAT,
136 REG_TYPE_MSK,
137 MAX_REG_TYPE_NR
138};
139
109/* cmdlist data structure */ 140/* cmdlist data structure */
110struct g2d_cmdlist { 141struct g2d_cmdlist {
111 u32 head; 142 u32 head;
@@ -113,6 +144,42 @@ struct g2d_cmdlist {
113 u32 last; /* last data offset */ 144 u32 last; /* last data offset */
114}; 145};
115 146
147/*
148 * A structure of buffer description
149 *
150 * @format: color format
151 * @left_x: the x coordinates of left top corner
152 * @top_y: the y coordinates of left top corner
153 * @right_x: the x coordinates of right bottom corner
154 * @bottom_y: the y coordinates of right bottom corner
155 *
156 */
157struct g2d_buf_desc {
158 unsigned int format;
159 unsigned int left_x;
160 unsigned int top_y;
161 unsigned int right_x;
162 unsigned int bottom_y;
163};
164
165/*
166 * A structure of buffer information
167 *
168 * @map_nr: manages the number of mapped buffers
169 * @reg_types: stores regitster type in the order of requested command
170 * @handles: stores buffer handle in its reg_type position
171 * @types: stores buffer type in its reg_type position
172 * @descs: stores buffer description in its reg_type position
173 *
174 */
175struct g2d_buf_info {
176 unsigned int map_nr;
177 enum g2d_reg_type reg_types[MAX_REG_TYPE_NR];
178 unsigned long handles[MAX_REG_TYPE_NR];
179 unsigned int types[MAX_REG_TYPE_NR];
180 struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
181};
182
116struct drm_exynos_pending_g2d_event { 183struct drm_exynos_pending_g2d_event {
117 struct drm_pending_event base; 184 struct drm_pending_event base;
118 struct drm_exynos_g2d_event event; 185 struct drm_exynos_g2d_event event;
@@ -131,14 +198,11 @@ struct g2d_cmdlist_userptr {
131 bool in_pool; 198 bool in_pool;
132 bool out_of_list; 199 bool out_of_list;
133}; 200};
134
135struct g2d_cmdlist_node { 201struct g2d_cmdlist_node {
136 struct list_head list; 202 struct list_head list;
137 struct g2d_cmdlist *cmdlist; 203 struct g2d_cmdlist *cmdlist;
138 unsigned int map_nr;
139 unsigned long handles[MAX_BUF_ADDR_NR];
140 unsigned int obj_type[MAX_BUF_ADDR_NR];
141 dma_addr_t dma_addr; 204 dma_addr_t dma_addr;
205 struct g2d_buf_info buf_info;
142 206
143 struct drm_exynos_pending_g2d_event *event; 207 struct drm_exynos_pending_g2d_event *event;
144}; 208};
@@ -188,6 +252,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
188 struct exynos_drm_subdrv *subdrv = &g2d->subdrv; 252 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
189 int nr; 253 int nr;
190 int ret; 254 int ret;
255 struct g2d_buf_info *buf_info;
191 256
192 init_dma_attrs(&g2d->cmdlist_dma_attrs); 257 init_dma_attrs(&g2d->cmdlist_dma_attrs);
193 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); 258 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
@@ -209,11 +274,17 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
209 } 274 }
210 275
211 for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) { 276 for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
277 unsigned int i;
278
212 node[nr].cmdlist = 279 node[nr].cmdlist =
213 g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE; 280 g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
214 node[nr].dma_addr = 281 node[nr].dma_addr =
215 g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE; 282 g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
216 283
284 buf_info = &node[nr].buf_info;
285 for (i = 0; i < MAX_REG_TYPE_NR; i++)
286 buf_info->reg_types[i] = REG_TYPE_NONE;
287
217 list_add_tail(&node[nr].list, &g2d->free_cmdlist); 288 list_add_tail(&node[nr].list, &g2d->free_cmdlist);
218 } 289 }
219 290
@@ -450,7 +521,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
450 DMA_BIDIRECTIONAL); 521 DMA_BIDIRECTIONAL);
451 if (ret < 0) { 522 if (ret < 0) {
452 DRM_ERROR("failed to map sgt with dma region.\n"); 523 DRM_ERROR("failed to map sgt with dma region.\n");
453 goto err_free_sgt; 524 goto err_sg_free_table;
454 } 525 }
455 526
456 g2d_userptr->dma_addr = sgt->sgl[0].dma_address; 527 g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
@@ -467,8 +538,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
467 538
468 return &g2d_userptr->dma_addr; 539 return &g2d_userptr->dma_addr;
469 540
470err_free_sgt: 541err_sg_free_table:
471 sg_free_table(sgt); 542 sg_free_table(sgt);
543
544err_free_sgt:
472 kfree(sgt); 545 kfree(sgt);
473 sgt = NULL; 546 sgt = NULL;
474 547
@@ -506,36 +579,172 @@ static void g2d_userptr_free_all(struct drm_device *drm_dev,
506 g2d->current_pool = 0; 579 g2d->current_pool = 0;
507} 580}
508 581
582static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
583{
584 enum g2d_reg_type reg_type;
585
586 switch (reg_offset) {
587 case G2D_SRC_BASE_ADDR:
588 case G2D_SRC_COLOR_MODE:
589 case G2D_SRC_LEFT_TOP:
590 case G2D_SRC_RIGHT_BOTTOM:
591 reg_type = REG_TYPE_SRC;
592 break;
593 case G2D_SRC_PLANE2_BASE_ADDR:
594 reg_type = REG_TYPE_SRC_PLANE2;
595 break;
596 case G2D_DST_BASE_ADDR:
597 case G2D_DST_COLOR_MODE:
598 case G2D_DST_LEFT_TOP:
599 case G2D_DST_RIGHT_BOTTOM:
600 reg_type = REG_TYPE_DST;
601 break;
602 case G2D_DST_PLANE2_BASE_ADDR:
603 reg_type = REG_TYPE_DST_PLANE2;
604 break;
605 case G2D_PAT_BASE_ADDR:
606 reg_type = REG_TYPE_PAT;
607 break;
608 case G2D_MSK_BASE_ADDR:
609 reg_type = REG_TYPE_MSK;
610 break;
611 default:
612 reg_type = REG_TYPE_NONE;
613 DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
614 break;
615 };
616
617 return reg_type;
618}
619
620static unsigned long g2d_get_buf_bpp(unsigned int format)
621{
622 unsigned long bpp;
623
624 switch (format) {
625 case G2D_FMT_XRGB8888:
626 case G2D_FMT_ARGB8888:
627 bpp = 4;
628 break;
629 case G2D_FMT_RGB565:
630 case G2D_FMT_XRGB1555:
631 case G2D_FMT_ARGB1555:
632 case G2D_FMT_XRGB4444:
633 case G2D_FMT_ARGB4444:
634 bpp = 2;
635 break;
636 case G2D_FMT_PACKED_RGB888:
637 bpp = 3;
638 break;
639 default:
640 bpp = 1;
641 break;
642 }
643
644 return bpp;
645}
646
647static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
648 enum g2d_reg_type reg_type,
649 unsigned long size)
650{
651 unsigned int width, height;
652 unsigned long area;
653
654 /*
655 * check source and destination buffers only.
656 * so the others are always valid.
657 */
658 if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
659 return true;
660
661 width = buf_desc->right_x - buf_desc->left_x;
662 if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
663 DRM_ERROR("width[%u] is out of range!\n", width);
664 return false;
665 }
666
667 height = buf_desc->bottom_y - buf_desc->top_y;
668 if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
669 DRM_ERROR("height[%u] is out of range!\n", height);
670 return false;
671 }
672
673 area = (unsigned long)width * (unsigned long)height *
674 g2d_get_buf_bpp(buf_desc->format);
675 if (area > size) {
676 DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size);
677 return false;
678 }
679
680 return true;
681}
682
509static int g2d_map_cmdlist_gem(struct g2d_data *g2d, 683static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
510 struct g2d_cmdlist_node *node, 684 struct g2d_cmdlist_node *node,
511 struct drm_device *drm_dev, 685 struct drm_device *drm_dev,
512 struct drm_file *file) 686 struct drm_file *file)
513{ 687{
514 struct g2d_cmdlist *cmdlist = node->cmdlist; 688 struct g2d_cmdlist *cmdlist = node->cmdlist;
689 struct g2d_buf_info *buf_info = &node->buf_info;
515 int offset; 690 int offset;
691 int ret;
516 int i; 692 int i;
517 693
518 for (i = 0; i < node->map_nr; i++) { 694 for (i = 0; i < buf_info->map_nr; i++) {
695 struct g2d_buf_desc *buf_desc;
696 enum g2d_reg_type reg_type;
697 int reg_pos;
519 unsigned long handle; 698 unsigned long handle;
520 dma_addr_t *addr; 699 dma_addr_t *addr;
521 700
522 offset = cmdlist->last - (i * 2 + 1); 701 reg_pos = cmdlist->last - 2 * (i + 1);
523 handle = cmdlist->data[offset]; 702
703 offset = cmdlist->data[reg_pos];
704 handle = cmdlist->data[reg_pos + 1];
705
706 reg_type = g2d_get_reg_type(offset);
707 if (reg_type == REG_TYPE_NONE) {
708 ret = -EFAULT;
709 goto err;
710 }
711
712 buf_desc = &buf_info->descs[reg_type];
713
714 if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
715 unsigned long size;
716
717 size = exynos_drm_gem_get_size(drm_dev, handle, file);
718 if (!size) {
719 ret = -EFAULT;
720 goto err;
721 }
722
723 if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
724 size)) {
725 ret = -EFAULT;
726 goto err;
727 }
524 728
525 if (node->obj_type[i] == BUF_TYPE_GEM) {
526 addr = exynos_drm_gem_get_dma_addr(drm_dev, handle, 729 addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
527 file); 730 file);
528 if (IS_ERR(addr)) { 731 if (IS_ERR(addr)) {
529 node->map_nr = i; 732 ret = -EFAULT;
530 return -EFAULT; 733 goto err;
531 } 734 }
532 } else { 735 } else {
533 struct drm_exynos_g2d_userptr g2d_userptr; 736 struct drm_exynos_g2d_userptr g2d_userptr;
534 737
535 if (copy_from_user(&g2d_userptr, (void __user *)handle, 738 if (copy_from_user(&g2d_userptr, (void __user *)handle,
536 sizeof(struct drm_exynos_g2d_userptr))) { 739 sizeof(struct drm_exynos_g2d_userptr))) {
537 node->map_nr = i; 740 ret = -EFAULT;
538 return -EFAULT; 741 goto err;
742 }
743
744 if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
745 g2d_userptr.size)) {
746 ret = -EFAULT;
747 goto err;
539 } 748 }
540 749
541 addr = g2d_userptr_get_dma_addr(drm_dev, 750 addr = g2d_userptr_get_dma_addr(drm_dev,
@@ -544,16 +753,21 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
544 file, 753 file,
545 &handle); 754 &handle);
546 if (IS_ERR(addr)) { 755 if (IS_ERR(addr)) {
547 node->map_nr = i; 756 ret = -EFAULT;
548 return -EFAULT; 757 goto err;
549 } 758 }
550 } 759 }
551 760
552 cmdlist->data[offset] = *addr; 761 cmdlist->data[reg_pos + 1] = *addr;
553 node->handles[i] = handle; 762 buf_info->reg_types[i] = reg_type;
763 buf_info->handles[reg_type] = handle;
554 } 764 }
555 765
556 return 0; 766 return 0;
767
768err:
769 buf_info->map_nr = i;
770 return ret;
557} 771}
558 772
559static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, 773static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
@@ -561,22 +775,33 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
561 struct drm_file *filp) 775 struct drm_file *filp)
562{ 776{
563 struct exynos_drm_subdrv *subdrv = &g2d->subdrv; 777 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
778 struct g2d_buf_info *buf_info = &node->buf_info;
564 int i; 779 int i;
565 780
566 for (i = 0; i < node->map_nr; i++) { 781 for (i = 0; i < buf_info->map_nr; i++) {
567 unsigned long handle = node->handles[i]; 782 struct g2d_buf_desc *buf_desc;
783 enum g2d_reg_type reg_type;
784 unsigned long handle;
785
786 reg_type = buf_info->reg_types[i];
787
788 buf_desc = &buf_info->descs[reg_type];
789 handle = buf_info->handles[reg_type];
568 790
569 if (node->obj_type[i] == BUF_TYPE_GEM) 791 if (buf_info->types[reg_type] == BUF_TYPE_GEM)
570 exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, 792 exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
571 filp); 793 filp);
572 else 794 else
573 g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, 795 g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
574 false); 796 false);
575 797
576 node->handles[i] = 0; 798 buf_info->reg_types[i] = REG_TYPE_NONE;
799 buf_info->handles[reg_type] = 0;
800 buf_info->types[reg_type] = 0;
801 memset(buf_desc, 0x00, sizeof(*buf_desc));
577 } 802 }
578 803
579 node->map_nr = 0; 804 buf_info->map_nr = 0;
580} 805}
581 806
582static void g2d_dma_start(struct g2d_data *g2d, 807static void g2d_dma_start(struct g2d_data *g2d,
@@ -589,10 +814,6 @@ static void g2d_dma_start(struct g2d_data *g2d,
589 pm_runtime_get_sync(g2d->dev); 814 pm_runtime_get_sync(g2d->dev);
590 clk_enable(g2d->gate_clk); 815 clk_enable(g2d->gate_clk);
591 816
592 /* interrupt enable */
593 writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
594 g2d->regs + G2D_INTEN);
595
596 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); 817 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
597 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); 818 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
598} 819}
@@ -643,7 +864,6 @@ static void g2d_runqueue_worker(struct work_struct *work)
643 struct g2d_data *g2d = container_of(work, struct g2d_data, 864 struct g2d_data *g2d = container_of(work, struct g2d_data,
644 runqueue_work); 865 runqueue_work);
645 866
646
647 mutex_lock(&g2d->runqueue_mutex); 867 mutex_lock(&g2d->runqueue_mutex);
648 clk_disable(g2d->gate_clk); 868 clk_disable(g2d->gate_clk);
649 pm_runtime_put_sync(g2d->dev); 869 pm_runtime_put_sync(g2d->dev);
@@ -724,20 +944,14 @@ static int g2d_check_reg_offset(struct device *dev,
724 int i; 944 int i;
725 945
726 for (i = 0; i < nr; i++) { 946 for (i = 0; i < nr; i++) {
727 index = cmdlist->last - 2 * (i + 1); 947 struct g2d_buf_info *buf_info = &node->buf_info;
948 struct g2d_buf_desc *buf_desc;
949 enum g2d_reg_type reg_type;
950 unsigned long value;
728 951
729 if (for_addr) { 952 index = cmdlist->last - 2 * (i + 1);
730 /* check userptr buffer type. */
731 reg_offset = (cmdlist->data[index] &
732 ~0x7fffffff) >> 31;
733 if (reg_offset) {
734 node->obj_type[i] = BUF_TYPE_USERPTR;
735 cmdlist->data[index] &= ~G2D_BUF_USERPTR;
736 }
737 }
738 953
739 reg_offset = cmdlist->data[index] & ~0xfffff000; 954 reg_offset = cmdlist->data[index] & ~0xfffff000;
740
741 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) 955 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
742 goto err; 956 goto err;
743 if (reg_offset % 4) 957 if (reg_offset % 4)
@@ -753,8 +967,60 @@ static int g2d_check_reg_offset(struct device *dev,
753 if (!for_addr) 967 if (!for_addr)
754 goto err; 968 goto err;
755 969
756 if (node->obj_type[i] != BUF_TYPE_USERPTR) 970 reg_type = g2d_get_reg_type(reg_offset);
757 node->obj_type[i] = BUF_TYPE_GEM; 971 if (reg_type == REG_TYPE_NONE)
972 goto err;
973
974 /* check userptr buffer type. */
975 if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
976 buf_info->types[reg_type] = BUF_TYPE_USERPTR;
977 cmdlist->data[index] &= ~G2D_BUF_USERPTR;
978 } else
979 buf_info->types[reg_type] = BUF_TYPE_GEM;
980 break;
981 case G2D_SRC_COLOR_MODE:
982 case G2D_DST_COLOR_MODE:
983 if (for_addr)
984 goto err;
985
986 reg_type = g2d_get_reg_type(reg_offset);
987 if (reg_type == REG_TYPE_NONE)
988 goto err;
989
990 buf_desc = &buf_info->descs[reg_type];
991 value = cmdlist->data[index + 1];
992
993 buf_desc->format = value & 0xf;
994 break;
995 case G2D_SRC_LEFT_TOP:
996 case G2D_DST_LEFT_TOP:
997 if (for_addr)
998 goto err;
999
1000 reg_type = g2d_get_reg_type(reg_offset);
1001 if (reg_type == REG_TYPE_NONE)
1002 goto err;
1003
1004 buf_desc = &buf_info->descs[reg_type];
1005 value = cmdlist->data[index + 1];
1006
1007 buf_desc->left_x = value & 0x1fff;
1008 buf_desc->top_y = (value & 0x1fff0000) >> 16;
1009 break;
1010 case G2D_SRC_RIGHT_BOTTOM:
1011 case G2D_DST_RIGHT_BOTTOM:
1012 if (for_addr)
1013 goto err;
1014
1015 reg_type = g2d_get_reg_type(reg_offset);
1016 if (reg_type == REG_TYPE_NONE)
1017 goto err;
1018
1019 buf_desc = &buf_info->descs[reg_type];
1020 value = cmdlist->data[index + 1];
1021
1022 buf_desc->right_x = value & 0x1fff;
1023 buf_desc->bottom_y = (value & 0x1fff0000) >> 16;
758 break; 1024 break;
759 default: 1025 default:
760 if (for_addr) 1026 if (for_addr)
@@ -860,9 +1126,23 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
860 cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR; 1126 cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
861 cmdlist->data[cmdlist->last++] = 0; 1127 cmdlist->data[cmdlist->last++] = 0;
862 1128
1129 /*
1130 * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG
1131 * and GCF bit should be set to INTEN register if user wants
1132 * G2D interrupt event once current command list execution is
1133 * finished.
1134 * Otherwise only ACF bit should be set to INTEN register so
1135 * that one interrupt is occured after all command lists
1136 * have been completed.
1137 */
863 if (node->event) { 1138 if (node->event) {
1139 cmdlist->data[cmdlist->last++] = G2D_INTEN;
1140 cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF;
864 cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD; 1141 cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
865 cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD; 1142 cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
1143 } else {
1144 cmdlist->data[cmdlist->last++] = G2D_INTEN;
1145 cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
866 } 1146 }
867 1147
868 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ 1148 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
@@ -887,7 +1167,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
887 if (ret < 0) 1167 if (ret < 0)
888 goto err_free_event; 1168 goto err_free_event;
889 1169
890 node->map_nr = req->cmd_buf_nr; 1170 node->buf_info.map_nr = req->cmd_buf_nr;
891 if (req->cmd_buf_nr) { 1171 if (req->cmd_buf_nr) {
892 struct drm_exynos_g2d_cmd *cmd_buf; 1172 struct drm_exynos_g2d_cmd *cmd_buf;
893 1173
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 67e17ce112b6..0e6fe000578c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -164,6 +164,27 @@ out:
164 exynos_gem_obj = NULL; 164 exynos_gem_obj = NULL;
165} 165}
166 166
167unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
168 unsigned int gem_handle,
169 struct drm_file *file_priv)
170{
171 struct exynos_drm_gem_obj *exynos_gem_obj;
172 struct drm_gem_object *obj;
173
174 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
175 if (!obj) {
176 DRM_ERROR("failed to lookup gem object.\n");
177 return 0;
178 }
179
180 exynos_gem_obj = to_exynos_gem_obj(obj);
181
182 drm_gem_object_unreference_unlocked(obj);
183
184 return exynos_gem_obj->buffer->size;
185}
186
187
167struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, 188struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
168 unsigned long size) 189 unsigned long size)
169{ 190{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 35ebac47dc2b..468766bee450 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -130,6 +130,11 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
130int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 130int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
131 struct drm_file *file_priv); 131 struct drm_file *file_priv);
132 132
133/* get buffer size to gem handle. */
134unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
135 unsigned int gem_handle,
136 struct drm_file *file_priv);
137
133/* initialize gem object. */ 138/* initialize gem object. */
134int exynos_drm_gem_init_object(struct drm_gem_object *obj); 139int exynos_drm_gem_init_object(struct drm_gem_object *obj);
135 140
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 13ccbd4bcfaa..9504b0cd825a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -117,13 +117,12 @@ static struct edid *vidi_get_edid(struct device *dev,
117 } 117 }
118 118
119 edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; 119 edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
120 edid = kzalloc(edid_len, GFP_KERNEL); 120 edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
121 if (!edid) { 121 if (!edid) {
122 DRM_DEBUG_KMS("failed to allocate edid\n"); 122 DRM_DEBUG_KMS("failed to allocate edid\n");
123 return ERR_PTR(-ENOMEM); 123 return ERR_PTR(-ENOMEM);
124 } 124 }
125 125
126 memcpy(edid, ctx->raw_edid, edid_len);
127 return edid; 126 return edid;
128} 127}
129 128
@@ -563,12 +562,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
563 return -EINVAL; 562 return -EINVAL;
564 } 563 }
565 edid_len = (1 + raw_edid->extensions) * EDID_LENGTH; 564 edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
566 ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL); 565 ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
567 if (!ctx->raw_edid) { 566 if (!ctx->raw_edid) {
568 DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); 567 DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
569 return -ENOMEM; 568 return -ENOMEM;
570 } 569 }
571 memcpy(ctx->raw_edid, raw_edid, edid_len);
572 } else { 570 } else {
573 /* 571 /*
574 * with connection = 0, free raw_edid 572 * with connection = 0, free raw_edid
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e919aba29b3d..2f4f72f07047 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -818,7 +818,7 @@ static void mixer_win_disable(void *ctx, int win)
818 mixer_ctx->win_data[win].enabled = false; 818 mixer_ctx->win_data[win].enabled = false;
819} 819}
820 820
821int mixer_check_timing(void *ctx, struct fb_videomode *timing) 821static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
822{ 822{
823 struct mixer_context *mixer_ctx = ctx; 823 struct mixer_context *mixer_ctx = ctx;
824 u32 w, h; 824 u32 w, h;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0a8eceb75902..e9b57893db2b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -125,6 +125,11 @@ MODULE_PARM_DESC(preliminary_hw_support,
125 "Enable Haswell and ValleyView Support. " 125 "Enable Haswell and ValleyView Support. "
126 "(default: false)"); 126 "(default: false)");
127 127
128int i915_disable_power_well __read_mostly = 0;
129module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
130MODULE_PARM_DESC(disable_power_well,
131 "Disable the power well when possible (default: false)");
132
128static struct drm_driver driver; 133static struct drm_driver driver;
129extern int intel_agp_enabled; 134extern int intel_agp_enabled;
130 135
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e95337c97459..01769e2a9953 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1398,6 +1398,7 @@ extern int i915_enable_fbc __read_mostly;
1398extern bool i915_enable_hangcheck __read_mostly; 1398extern bool i915_enable_hangcheck __read_mostly;
1399extern int i915_enable_ppgtt __read_mostly; 1399extern int i915_enable_ppgtt __read_mostly;
1400extern unsigned int i915_preliminary_hw_support __read_mostly; 1400extern unsigned int i915_preliminary_hw_support __read_mostly;
1401extern int i915_disable_power_well __read_mostly;
1401 1402
1402extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1403extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1403extern int i915_resume(struct drm_device *dev); 1404extern int i915_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3b11ab0fbc96..9a48e1a2d417 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -57,7 +57,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
57 if (eb == NULL) { 57 if (eb == NULL) {
58 int size = args->buffer_count; 58 int size = args->buffer_count;
59 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; 59 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
60 BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); 60 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
61 while (count > 2*size) 61 while (count > 2*size)
62 count >>= 1; 62 count >>= 1;
63 eb = kzalloc(count*sizeof(struct hlist_head) + 63 eb = kzalloc(count*sizeof(struct hlist_head) +
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 32a3693905ec..1ce45a0a2d3e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -45,6 +45,9 @@
45 45
46struct intel_crt { 46struct intel_crt {
47 struct intel_encoder base; 47 struct intel_encoder base;
48 /* DPMS state is stored in the connector, which we need in the
49 * encoder's enable/disable callbacks */
50 struct intel_connector *connector;
48 bool force_hotplug_required; 51 bool force_hotplug_required;
49 u32 adpa_reg; 52 u32 adpa_reg;
50}; 53};
@@ -81,29 +84,6 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
81 return true; 84 return true;
82} 85}
83 86
84static void intel_disable_crt(struct intel_encoder *encoder)
85{
86 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
87 struct intel_crt *crt = intel_encoder_to_crt(encoder);
88 u32 temp;
89
90 temp = I915_READ(crt->adpa_reg);
91 temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
92 temp &= ~ADPA_DAC_ENABLE;
93 I915_WRITE(crt->adpa_reg, temp);
94}
95
96static void intel_enable_crt(struct intel_encoder *encoder)
97{
98 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
99 struct intel_crt *crt = intel_encoder_to_crt(encoder);
100 u32 temp;
101
102 temp = I915_READ(crt->adpa_reg);
103 temp |= ADPA_DAC_ENABLE;
104 I915_WRITE(crt->adpa_reg, temp);
105}
106
107/* Note: The caller is required to filter out dpms modes not supported by the 87/* Note: The caller is required to filter out dpms modes not supported by the
108 * platform. */ 88 * platform. */
109static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) 89static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -135,6 +115,19 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
135 I915_WRITE(crt->adpa_reg, temp); 115 I915_WRITE(crt->adpa_reg, temp);
136} 116}
137 117
118static void intel_disable_crt(struct intel_encoder *encoder)
119{
120 intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
121}
122
123static void intel_enable_crt(struct intel_encoder *encoder)
124{
125 struct intel_crt *crt = intel_encoder_to_crt(encoder);
126
127 intel_crt_set_dpms(encoder, crt->connector->base.dpms);
128}
129
130
138static void intel_crt_dpms(struct drm_connector *connector, int mode) 131static void intel_crt_dpms(struct drm_connector *connector, int mode)
139{ 132{
140 struct drm_device *dev = connector->dev; 133 struct drm_device *dev = connector->dev;
@@ -746,6 +739,7 @@ void intel_crt_init(struct drm_device *dev)
746 } 739 }
747 740
748 connector = &intel_connector->base; 741 connector = &intel_connector->base;
742 crt->connector = intel_connector;
749 drm_connector_init(dev, &intel_connector->base, 743 drm_connector_init(dev, &intel_connector->base,
750 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 744 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
751 745
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 287b42c9d1a8..b20d50192fcc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5771,6 +5771,11 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5771 num_connectors++; 5771 num_connectors++;
5772 } 5772 }
5773 5773
5774 if (is_cpu_edp)
5775 intel_crtc->cpu_transcoder = TRANSCODER_EDP;
5776 else
5777 intel_crtc->cpu_transcoder = pipe;
5778
5774 /* We are not sure yet this won't happen. */ 5779 /* We are not sure yet this won't happen. */
5775 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", 5780 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
5776 INTEL_PCH_TYPE(dev)); 5781 INTEL_PCH_TYPE(dev));
@@ -5837,11 +5842,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5837 int pipe = intel_crtc->pipe; 5842 int pipe = intel_crtc->pipe;
5838 int ret; 5843 int ret;
5839 5844
5840 if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
5841 intel_crtc->cpu_transcoder = TRANSCODER_EDP;
5842 else
5843 intel_crtc->cpu_transcoder = pipe;
5844
5845 drm_vblank_pre_modeset(dev, pipe); 5845 drm_vblank_pre_modeset(dev, pipe);
5846 5846
5847 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, 5847 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d7d4afe01341..8fc93f90a7cd 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2559,12 +2559,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2559{ 2559{
2560 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 2560 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2561 struct intel_dp *intel_dp = &intel_dig_port->dp; 2561 struct intel_dp *intel_dp = &intel_dig_port->dp;
2562 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2562 2563
2563 i2c_del_adapter(&intel_dp->adapter); 2564 i2c_del_adapter(&intel_dp->adapter);
2564 drm_encoder_cleanup(encoder); 2565 drm_encoder_cleanup(encoder);
2565 if (is_edp(intel_dp)) { 2566 if (is_edp(intel_dp)) {
2566 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2567 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2568 mutex_lock(&dev->mode_config.mutex);
2567 ironlake_panel_vdd_off_sync(intel_dp); 2569 ironlake_panel_vdd_off_sync(intel_dp);
2570 mutex_unlock(&dev->mode_config.mutex);
2568 } 2571 }
2569 kfree(intel_dig_port); 2572 kfree(intel_dig_port);
2570} 2573}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a3730e0289e5..bee8cb6108a7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -321,9 +321,6 @@ void intel_panel_enable_backlight(struct drm_device *dev,
321 if (dev_priv->backlight_level == 0) 321 if (dev_priv->backlight_level == 0)
322 dev_priv->backlight_level = intel_panel_get_max_backlight(dev); 322 dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
323 323
324 dev_priv->backlight_enabled = true;
325 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
326
327 if (INTEL_INFO(dev)->gen >= 4) { 324 if (INTEL_INFO(dev)->gen >= 4) {
328 uint32_t reg, tmp; 325 uint32_t reg, tmp;
329 326
@@ -359,12 +356,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
359 } 356 }
360 357
361set_level: 358set_level:
362 /* Check the current backlight level and try to set again if it's zero. 359 /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
363 * On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically 360 * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
364 * when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written. 361 * registers are set.
365 */ 362 */
366 if (!intel_panel_get_backlight(dev)) 363 dev_priv->backlight_enabled = true;
367 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); 364 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
368} 365}
369 366
370static void intel_panel_init_backlight(struct drm_device *dev) 367static void intel_panel_init_backlight(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a1794c6df1bf..adca00783e61 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4079,6 +4079,9 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
4079 if (!IS_HASWELL(dev)) 4079 if (!IS_HASWELL(dev))
4080 return; 4080 return;
4081 4081
4082 if (!i915_disable_power_well && !enable)
4083 return;
4084
4082 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 4085 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
4083 is_enabled = tmp & HSW_PWR_WELL_STATE; 4086 is_enabled = tmp & HSW_PWR_WELL_STATE;
4084 enable_requested = tmp & HSW_PWR_WELL_ENABLE; 4087 enable_requested = tmp & HSW_PWR_WELL_ENABLE;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index fe22bb780e1d..78d8e919509f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -751,8 +751,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
751 int i; 751 int i;
752 unsigned char misc = 0; 752 unsigned char misc = 0;
753 unsigned char ext_vga[6]; 753 unsigned char ext_vga[6];
754 unsigned char ext_vga_index24;
755 unsigned char dac_index90 = 0;
756 u8 bppshift; 754 u8 bppshift;
757 755
758 static unsigned char dacvalue[] = { 756 static unsigned char dacvalue[] = {
@@ -803,7 +801,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
803 option2 = 0x0000b000; 801 option2 = 0x0000b000;
804 break; 802 break;
805 case G200_ER: 803 case G200_ER:
806 dac_index90 = 0;
807 break; 804 break;
808 } 805 }
809 806
@@ -852,10 +849,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
852 WREG_DAC(i, dacvalue[i]); 849 WREG_DAC(i, dacvalue[i]);
853 } 850 }
854 851
855 if (mdev->type == G200_ER) { 852 if (mdev->type == G200_ER)
856 WREG_DAC(0x90, dac_index90); 853 WREG_DAC(0x90, 0);
857 }
858
859 854
860 if (option) 855 if (option)
861 pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); 856 pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
@@ -952,8 +947,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
952 if (mdev->type == G200_WB) 947 if (mdev->type == G200_WB)
953 ext_vga[1] |= 0x88; 948 ext_vga[1] |= 0x88;
954 949
955 ext_vga_index24 = 0x05;
956
957 /* Set pixel clocks */ 950 /* Set pixel clocks */
958 misc = 0x2d; 951 misc = 0x2d;
959 WREG8(MGA_MISC_OUT, misc); 952 WREG8(MGA_MISC_OUT, misc);
@@ -965,7 +958,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
965 } 958 }
966 959
967 if (mdev->type == G200_ER) 960 if (mdev->type == G200_ER)
968 WREG_ECRT(24, ext_vga_index24); 961 WREG_ECRT(0x24, 0x5);
969 962
970 if (mdev->type == G200_EV) { 963 if (mdev->type == G200_EV) {
971 WREG_ECRT(6, 0); 964 WREG_ECRT(6, 0);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index e816f06637a7..0e2c1a4f1659 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -248,6 +248,22 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios)
248 } 248 }
249} 249}
250 250
251static void
252nouveau_bios_shadow_platform(struct nouveau_bios *bios)
253{
254 struct pci_dev *pdev = nv_device(bios)->pdev;
255 size_t size;
256
257 void __iomem *rom = pci_platform_rom(pdev, &size);
258 if (rom && size) {
259 bios->data = kmalloc(size, GFP_KERNEL);
260 if (bios->data) {
261 memcpy_fromio(bios->data, rom, size);
262 bios->size = size;
263 }
264 }
265}
266
251static int 267static int
252nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) 268nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
253{ 269{
@@ -288,6 +304,7 @@ nouveau_bios_shadow(struct nouveau_bios *bios)
288 { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL }, 304 { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL },
289 { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL }, 305 { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL },
290 { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, 306 { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL },
307 { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL },
291 {} 308 {}
292 }; 309 };
293 struct methods *mthd, *best; 310 struct methods *mthd, *best;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 3b6dc883e150..5eb3e0da7c6e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -391,7 +391,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
391 struct nouveau_drm *drm = nouveau_drm(dev); 391 struct nouveau_drm *drm = nouveau_drm(dev);
392 struct nouveau_device *device = nv_device(drm->device); 392 struct nouveau_device *device = nv_device(drm->device);
393 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 393 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
394 struct nouveau_abi16_chan *chan, *temp; 394 struct nouveau_abi16_chan *chan = NULL, *temp;
395 struct nouveau_abi16_ntfy *ntfy; 395 struct nouveau_abi16_ntfy *ntfy;
396 struct nouveau_object *object; 396 struct nouveau_object *object;
397 struct nv_dma_class args = {}; 397 struct nv_dma_class args = {};
@@ -404,10 +404,11 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
404 if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) 404 if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
405 return nouveau_abi16_put(abi16, -EINVAL); 405 return nouveau_abi16_put(abi16, -EINVAL);
406 406
407 list_for_each_entry_safe(chan, temp, &abi16->channels, head) { 407 list_for_each_entry(temp, &abi16->channels, head) {
408 if (chan->chan->handle == (NVDRM_CHAN | info->channel)) 408 if (temp->chan->handle == (NVDRM_CHAN | info->channel)) {
409 chan = temp;
409 break; 410 break;
410 chan = NULL; 411 }
411 } 412 }
412 413
413 if (!chan) 414 if (!chan)
@@ -459,17 +460,18 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
459{ 460{
460 struct drm_nouveau_gpuobj_free *fini = data; 461 struct drm_nouveau_gpuobj_free *fini = data;
461 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 462 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
462 struct nouveau_abi16_chan *chan, *temp; 463 struct nouveau_abi16_chan *chan = NULL, *temp;
463 struct nouveau_abi16_ntfy *ntfy; 464 struct nouveau_abi16_ntfy *ntfy;
464 int ret; 465 int ret;
465 466
466 if (unlikely(!abi16)) 467 if (unlikely(!abi16))
467 return -ENOMEM; 468 return -ENOMEM;
468 469
469 list_for_each_entry_safe(chan, temp, &abi16->channels, head) { 470 list_for_each_entry(temp, &abi16->channels, head) {
470 if (chan->chan->handle == (NVDRM_CHAN | fini->channel)) 471 if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) {
472 chan = temp;
471 break; 473 break;
472 chan = NULL; 474 }
473 } 475 }
474 476
475 if (!chan) 477 if (!chan)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index d1099365bfc1..c95decf543e9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -72,11 +72,25 @@ module_param_named(modeset, nouveau_modeset, int, 0400);
72static struct drm_driver driver; 72static struct drm_driver driver;
73 73
74static int 74static int
75nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
76{
77 struct nouveau_drm *drm =
78 container_of(event, struct nouveau_drm, vblank[head]);
79 drm_handle_vblank(drm->dev, head);
80 return NVKM_EVENT_KEEP;
81}
82
83static int
75nouveau_drm_vblank_enable(struct drm_device *dev, int head) 84nouveau_drm_vblank_enable(struct drm_device *dev, int head)
76{ 85{
77 struct nouveau_drm *drm = nouveau_drm(dev); 86 struct nouveau_drm *drm = nouveau_drm(dev);
78 struct nouveau_disp *pdisp = nouveau_disp(drm->device); 87 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
79 nouveau_event_get(pdisp->vblank, head, &drm->vblank); 88
89 if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank)))
90 return -EIO;
91 WARN_ON_ONCE(drm->vblank[head].func);
92 drm->vblank[head].func = nouveau_drm_vblank_handler;
93 nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]);
80 return 0; 94 return 0;
81} 95}
82 96
@@ -85,16 +99,11 @@ nouveau_drm_vblank_disable(struct drm_device *dev, int head)
85{ 99{
86 struct nouveau_drm *drm = nouveau_drm(dev); 100 struct nouveau_drm *drm = nouveau_drm(dev);
87 struct nouveau_disp *pdisp = nouveau_disp(drm->device); 101 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
88 nouveau_event_put(pdisp->vblank, head, &drm->vblank); 102 if (drm->vblank[head].func)
89} 103 nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]);
90 104 else
91static int 105 WARN_ON_ONCE(1);
92nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) 106 drm->vblank[head].func = NULL;
93{
94 struct nouveau_drm *drm =
95 container_of(event, struct nouveau_drm, vblank);
96 drm_handle_vblank(drm->dev, head);
97 return NVKM_EVENT_KEEP;
98} 107}
99 108
100static u64 109static u64
@@ -292,7 +301,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
292 301
293 dev->dev_private = drm; 302 dev->dev_private = drm;
294 drm->dev = dev; 303 drm->dev = dev;
295 drm->vblank.func = nouveau_drm_vblank_handler;
296 304
297 INIT_LIST_HEAD(&drm->clients); 305 INIT_LIST_HEAD(&drm->clients);
298 spin_lock_init(&drm->tile.lock); 306 spin_lock_init(&drm->tile.lock);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index b25df374c901..9c39bafbef2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -113,7 +113,7 @@ struct nouveau_drm {
113 struct nvbios vbios; 113 struct nvbios vbios;
114 struct nouveau_display *display; 114 struct nouveau_display *display;
115 struct backlight_device *backlight; 115 struct backlight_device *backlight;
116 struct nouveau_eventh vblank; 116 struct nouveau_eventh vblank[4];
117 117
118 /* power management */ 118 /* power management */
119 struct nouveau_pm *pm; 119 struct nouveau_pm *pm;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7f0e6c3f37d1..1ddc03e51bf4 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data)
479{ 479{
480 struct nv50_display_flip *flip = data; 480 struct nv50_display_flip *flip = data;
481 if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == 481 if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
482 flip->chan->data); 482 flip->chan->data)
483 return true; 483 return true;
484 usleep_range(1, 2); 484 usleep_range(1, 2);
485 return false; 485 return false;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index b8015913d382..fa3c56fba294 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -99,6 +99,29 @@ static bool radeon_read_bios(struct radeon_device *rdev)
99 return true; 99 return true;
100} 100}
101 101
102static bool radeon_read_platform_bios(struct radeon_device *rdev)
103{
104 uint8_t __iomem *bios;
105 size_t size;
106
107 rdev->bios = NULL;
108
109 bios = pci_platform_rom(rdev->pdev, &size);
110 if (!bios) {
111 return false;
112 }
113
114 if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
115 return false;
116 }
117 rdev->bios = kmemdup(bios, size, GFP_KERNEL);
118 if (rdev->bios == NULL) {
119 return false;
120 }
121
122 return true;
123}
124
102#ifdef CONFIG_ACPI 125#ifdef CONFIG_ACPI
103/* ATRM is used to get the BIOS on the discrete cards in 126/* ATRM is used to get the BIOS on the discrete cards in
104 * dual-gpu systems. 127 * dual-gpu systems.
@@ -620,6 +643,9 @@ bool radeon_get_bios(struct radeon_device *rdev)
620 if (r == false) { 643 if (r == false) {
621 r = radeon_read_disabled_bios(rdev); 644 r = radeon_read_disabled_bios(rdev);
622 } 645 }
646 if (r == false) {
647 r = radeon_read_platform_bios(rdev);
648 }
623 if (r == false || rdev->bios == NULL) { 649 if (r == false || rdev->bios == NULL) {
624 DRM_ERROR("Unable to locate a BIOS ROM\n"); 650 DRM_ERROR("Unable to locate a BIOS ROM\n");
625 rdev->bios = NULL; 651 rdev->bios = NULL;
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index fe5cdbcf2636..b44d548c56f8 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -61,6 +61,10 @@ static int udl_get_modes(struct drm_connector *connector)
61 int ret; 61 int ret;
62 62
63 edid = (struct edid *)udl_get_edid(udl); 63 edid = (struct edid *)udl_get_edid(udl);
64 if (!edid) {
65 drm_mode_connector_update_edid_property(connector, NULL);
66 return 0;
67 }
64 68
65 /* 69 /*
66 * We only read the main block, but if the monitor reports extension 70 * We only read the main block, but if the monitor reports extension
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 512b01c04ea7..aa341d135867 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2077,7 +2077,6 @@ static const struct hid_device_id hid_ignore_list[] = {
2077 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, 2077 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
2078 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, 2078 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
2079 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, 2079 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
2080 { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },
2081 { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, 2080 { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
2082 { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, 2081 { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
2083 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, 2082 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -2244,6 +2243,18 @@ bool hid_ignore(struct hid_device *hdev)
2244 hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST)) 2243 hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
2245 return true; 2244 return true;
2246 break; 2245 break;
2246 case USB_VENDOR_ID_ATMEL_V_USB:
2247 /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
2248 * it has the same USB ID as many Atmel V-USB devices. This
2249 * usb radio is handled by radio-ma901.c driver so we want
2250 * ignore the hid. Check the name, bus, product and ignore
2251 * if we have MA901 usb radio.
2252 */
2253 if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
2254 hdev->bus == BUS_USB &&
2255 strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
2256 return true;
2257 break;
2247 } 2258 }
2248 2259
2249 if (hdev->type == HID_TYPE_USBMOUSE && 2260 if (hdev->type == HID_TYPE_USBMOUSE &&
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 92e47e5c9564..5309fd5eb0eb 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -158,6 +158,8 @@
158#define USB_VENDOR_ID_ATMEL 0x03eb 158#define USB_VENDOR_ID_ATMEL 0x03eb
159#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c 159#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
160#define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118 160#define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118
161#define USB_VENDOR_ID_ATMEL_V_USB 0x16c0
162#define USB_DEVICE_ID_ATMEL_V_USB 0x05df
161 163
162#define USB_VENDOR_ID_AUREAL 0x0755 164#define USB_VENDOR_ID_AUREAL 0x0755
163#define USB_DEVICE_ID_AUREAL_W01RN 0x2626 165#define USB_DEVICE_ID_AUREAL_W01RN 0x2626
@@ -557,9 +559,6 @@
557#define USB_VENDOR_ID_MADCATZ 0x0738 559#define USB_VENDOR_ID_MADCATZ 0x0738
558#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 560#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
559 561
560#define USB_VENDOR_ID_MASTERKIT 0x16c0
561#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df
562
563#define USB_VENDOR_ID_MCC 0x09db 562#define USB_VENDOR_ID_MCC 0x09db
564#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 563#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
565#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a 564#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
@@ -590,6 +589,9 @@
590#define USB_VENDOR_ID_MONTEREY 0x0566 589#define USB_VENDOR_ID_MONTEREY 0x0566
591#define USB_DEVICE_ID_GENIUS_KB29E 0x3004 590#define USB_DEVICE_ID_GENIUS_KB29E 0x3004
592 591
592#define USB_VENDOR_ID_MSI 0x1770
593#define USB_DEVICE_ID_MSI_GX680R_LED_PANEL 0xff00
594
593#define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400 595#define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400
594#define USB_DEVICE_ID_N_S_HARMONY 0xc359 596#define USB_DEVICE_ID_N_S_HARMONY 0xc359
595 597
@@ -684,6 +686,9 @@
684#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001 686#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
685#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 687#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
686 688
689#define USB_VENDOR_ID_REALTEK 0x0bda
690#define USB_DEVICE_ID_REALTEK_READER 0x0152
691
687#define USB_VENDOR_ID_ROCCAT 0x1e7d 692#define USB_VENDOR_ID_ROCCAT 0x1e7d
688#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 693#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
689#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c 694#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index f7f113ba083e..a8ce44296cfd 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -462,6 +462,21 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
462 return 0; 462 return 0;
463} 463}
464 464
465static void magicmouse_input_configured(struct hid_device *hdev,
466 struct hid_input *hi)
467
468{
469 struct magicmouse_sc *msc = hid_get_drvdata(hdev);
470
471 int ret = magicmouse_setup_input(msc->input, hdev);
472 if (ret) {
473 hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
474 /* clean msc->input to notify probe() of the failure */
475 msc->input = NULL;
476 }
477}
478
479
465static int magicmouse_probe(struct hid_device *hdev, 480static int magicmouse_probe(struct hid_device *hdev,
466 const struct hid_device_id *id) 481 const struct hid_device_id *id)
467{ 482{
@@ -493,15 +508,10 @@ static int magicmouse_probe(struct hid_device *hdev,
493 goto err_free; 508 goto err_free;
494 } 509 }
495 510
496 /* We do this after hid-input is done parsing reports so that 511 if (!msc->input) {
497 * hid-input uses the most natural button and axis IDs. 512 hid_err(hdev, "magicmouse input not registered\n");
498 */ 513 ret = -ENOMEM;
499 if (msc->input) { 514 goto err_stop_hw;
500 ret = magicmouse_setup_input(msc->input, hdev);
501 if (ret) {
502 hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
503 goto err_stop_hw;
504 }
505 } 515 }
506 516
507 if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) 517 if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
@@ -568,6 +578,7 @@ static struct hid_driver magicmouse_driver = {
568 .remove = magicmouse_remove, 578 .remove = magicmouse_remove,
569 .raw_event = magicmouse_raw_event, 579 .raw_event = magicmouse_raw_event,
570 .input_mapping = magicmouse_input_mapping, 580 .input_mapping = magicmouse_input_mapping,
581 .input_configured = magicmouse_input_configured,
571}; 582};
572module_hid_driver(magicmouse_driver); 583module_hid_driver(magicmouse_driver);
573 584
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 7a1ebb867cf4..82e9211b3ca9 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -621,6 +621,7 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
621{ 621{
622 struct mt_device *td = hid_get_drvdata(hid); 622 struct mt_device *td = hid_get_drvdata(hid);
623 __s32 quirks = td->mtclass.quirks; 623 __s32 quirks = td->mtclass.quirks;
624 struct input_dev *input = field->hidinput->input;
624 625
625 if (hid->claimed & HID_CLAIMED_INPUT) { 626 if (hid->claimed & HID_CLAIMED_INPUT) {
626 switch (usage->hid) { 627 switch (usage->hid) {
@@ -670,13 +671,16 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
670 break; 671 break;
671 672
672 default: 673 default:
674 if (usage->type)
675 input_event(input, usage->type, usage->code,
676 value);
673 return; 677 return;
674 } 678 }
675 679
676 if (usage->usage_index + 1 == field->report_count) { 680 if (usage->usage_index + 1 == field->report_count) {
677 /* we only take into account the last report. */ 681 /* we only take into account the last report. */
678 if (usage->hid == td->last_slot_field) 682 if (usage->hid == td->last_slot_field)
679 mt_complete_slot(td, field->hidinput->input); 683 mt_complete_slot(td, input);
680 684
681 if (field->index == td->last_field_index 685 if (field->index == td->last_field_index
682 && td->num_received >= td->num_expected) 686 && td->num_received >= td->num_expected)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index e0e6abf1cd3b..19b8360f2330 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -73,6 +73,7 @@ static const struct hid_blacklist {
73 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 73 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
74 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 74 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
75 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 75 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
76 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
76 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, 77 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
77 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, 78 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
78 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, 79 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
@@ -80,6 +81,7 @@ static const struct hid_blacklist {
80 { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, 81 { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
81 { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET }, 82 { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
82 { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET }, 83 { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
84 { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
83 { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET }, 85 { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
84 { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET }, 86 { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
85 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, 87 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index db713c0dfba4..461a0d739d75 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -416,6 +416,8 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)
416 ret = pm_runtime_get_sync(dev); 416 ret = pm_runtime_get_sync(dev);
417 if (ret < 0) { 417 if (ret < 0) {
418 dev_err(dev, "%s: can't power on device\n", __func__); 418 dev_err(dev, "%s: can't power on device\n", __func__);
419 pm_runtime_put_noidle(dev);
420 module_put(dev->driver->owner);
419 return ret; 421 return ret;
420 } 422 }
421 423
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0ceb6e1b0f65..e3085c487ace 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -182,7 +182,6 @@ static int dw_i2c_probe(struct platform_device *pdev)
182 adap->algo = &i2c_dw_algo; 182 adap->algo = &i2c_dw_algo;
183 adap->dev.parent = &pdev->dev; 183 adap->dev.parent = &pdev->dev;
184 adap->dev.of_node = pdev->dev.of_node; 184 adap->dev.of_node = pdev->dev.of_node;
185 ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev));
186 185
187 r = i2c_add_numbered_adapter(adap); 186 r = i2c_add_numbered_adapter(adap);
188 if (r) { 187 if (r) {
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 17ba4f8bc12d..70b1808a08f4 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -186,8 +186,10 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
186 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), 186 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
187 wq->rq.memsize, &(wq->rq.dma_addr), 187 wq->rq.memsize, &(wq->rq.dma_addr),
188 GFP_KERNEL); 188 GFP_KERNEL);
189 if (!wq->rq.queue) 189 if (!wq->rq.queue) {
190 ret = -ENOMEM;
190 goto free_sq; 191 goto free_sq;
192 }
191 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", 193 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
192 __func__, wq->sq.queue, 194 __func__, wq->sq.queue,
193 (unsigned long long)virt_to_phys(wq->sq.queue), 195 (unsigned long long)virt_to_phys(wq->sq.queue),
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 439c35d4a669..ea93870266eb 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -620,7 +620,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
620 goto bail; 620 goto bail;
621 } 621 }
622 622
623 opcode = be32_to_cpu(ohdr->bth[0]) >> 24; 623 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
624 dev->opstats[opcode].n_bytes += tlen; 624 dev->opstats[opcode].n_bytes += tlen;
625 dev->opstats[opcode].n_packets++; 625 dev->opstats[opcode].n_packets++;
626 626
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
index 8349f9c5064c..1e603a375069 100644
--- a/drivers/infiniband/hw/qib/Kconfig
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -1,7 +1,7 @@
1config INFINIBAND_QIB 1config INFINIBAND_QIB
2 tristate "QLogic PCIe HCA support" 2 tristate "Intel PCIe HCA support"
3 depends on 64BIT 3 depends on 64BIT
4 ---help--- 4 ---help---
5 This is a low-level driver for QLogic PCIe QLE InfiniBand host 5 This is a low-level driver for Intel PCIe QLE InfiniBand host
6 channel adapters. This driver does not support the QLogic 6 channel adapters. This driver does not support the Intel
7 HyperTransport card (model QHT7140). 7 HyperTransport card (model QHT7140).
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 5423edcab51f..216092477dfc 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013 Intel Corporation. All rights reserved.
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 5 *
@@ -63,8 +64,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
63 "Attempt pre-IBTA 1.2 DDR speed negotiation"); 64 "Attempt pre-IBTA 1.2 DDR speed negotiation");
64 65
65MODULE_LICENSE("Dual BSD/GPL"); 66MODULE_LICENSE("Dual BSD/GPL");
66MODULE_AUTHOR("QLogic <support@qlogic.com>"); 67MODULE_AUTHOR("Intel <ibsupport@intel.com>");
67MODULE_DESCRIPTION("QLogic IB driver"); 68MODULE_DESCRIPTION("Intel IB driver");
68MODULE_VERSION(QIB_DRIVER_VERSION); 69MODULE_VERSION(QIB_DRIVER_VERSION);
69 70
70/* 71/*
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a099ac171e22..0232ae56b1fa 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013 Intel Corporation. All rights reserved.
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. 3 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved. 4 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
@@ -51,7 +52,7 @@ static u32 qib_6120_iblink_state(u64);
51 52
52/* 53/*
53 * This file contains all the chip-specific register information and 54 * This file contains all the chip-specific register information and
54 * access functions for the QLogic QLogic_IB PCI-Express chip. 55 * access functions for the Intel Intel_IB PCI-Express chip.
55 * 56 *
56 */ 57 */
57 58
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 50e33aa0b4e3..173f805790da 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved. 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * 5 *
@@ -1138,7 +1138,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
1138static void qib_remove_one(struct pci_dev *); 1138static void qib_remove_one(struct pci_dev *);
1139static int qib_init_one(struct pci_dev *, const struct pci_device_id *); 1139static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
1140 1140
1141#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: " 1141#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
1142#define PFX QIB_DRV_NAME ": " 1142#define PFX QIB_DRV_NAME ": "
1143 1143
1144static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = { 1144static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
@@ -1355,7 +1355,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1355 dd = qib_init_iba6120_funcs(pdev, ent); 1355 dd = qib_init_iba6120_funcs(pdev, ent);
1356#else 1356#else
1357 qib_early_err(&pdev->dev, 1357 qib_early_err(&pdev->dev,
1358 "QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", 1358 "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
1359 ent->device); 1359 ent->device);
1360 dd = ERR_PTR(-ENODEV); 1360 dd = ERR_PTR(-ENODEV);
1361#endif 1361#endif
@@ -1371,7 +1371,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1371 1371
1372 default: 1372 default:
1373 qib_early_err(&pdev->dev, 1373 qib_early_err(&pdev->dev,
1374 "Failing on unknown QLogic deviceid 0x%x\n", 1374 "Failing on unknown Intel deviceid 0x%x\n",
1375 ent->device); 1375 ent->device);
1376 ret = -ENODEV; 1376 ret = -ENODEV;
1377 } 1377 }
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 50a8a0d4fe67..911205d3d5a0 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved. 2 * Copyright (c) 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * 5 *
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index ba51a4715a1d..7c0ab16a2fe2 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved. 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * 5 *
@@ -2224,7 +2224,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
2224 ibdev->dma_ops = &qib_dma_mapping_ops; 2224 ibdev->dma_ops = &qib_dma_mapping_ops;
2225 2225
2226 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), 2226 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
2227 "QLogic Infiniband HCA %s", init_utsname()->nodename); 2227 "Intel Infiniband HCA %s", init_utsname()->nodename);
2228 2228
2229 ret = ib_register_device(ibdev, qib_create_port_files); 2229 ret = ib_register_device(ibdev, qib_create_port_files);
2230 if (ret) 2230 if (ret)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 67b0c1d23678..1ef880de3a41 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -758,9 +758,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
758 if (++priv->tx_outstanding == ipoib_sendq_size) { 758 if (++priv->tx_outstanding == ipoib_sendq_size) {
759 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", 759 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
760 tx->qp->qp_num); 760 tx->qp->qp_num);
761 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
762 ipoib_warn(priv, "request notify on send CQ failed\n");
763 netif_stop_queue(dev); 761 netif_stop_queue(dev);
762 rc = ib_req_notify_cq(priv->send_cq,
763 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
764 if (rc < 0)
765 ipoib_warn(priv, "request notify on send CQ failed\n");
766 else if (rc)
767 ipoib_send_comp_handler(priv->send_cq, dev);
764 } 768 }
765 } 769 }
766} 770}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 5c514d0711d1..c332fb98480d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -130,7 +130,7 @@ config IRQ_REMAP
130# OMAP IOMMU support 130# OMAP IOMMU support
131config OMAP_IOMMU 131config OMAP_IOMMU
132 bool "OMAP IOMMU Support" 132 bool "OMAP IOMMU Support"
133 depends on ARCH_OMAP 133 depends on ARCH_OMAP2PLUS
134 select IOMMU_API 134 select IOMMU_API
135 135
136config OMAP_IOVMM 136config OMAP_IOVMM
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 98f555dafb55..b287ca33833d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2466,18 +2466,16 @@ static int device_change_notifier(struct notifier_block *nb,
2466 2466
2467 /* allocate a protection domain if a device is added */ 2467 /* allocate a protection domain if a device is added */
2468 dma_domain = find_protection_domain(devid); 2468 dma_domain = find_protection_domain(devid);
2469 if (dma_domain) 2469 if (!dma_domain) {
2470 goto out; 2470 dma_domain = dma_ops_domain_alloc();
2471 dma_domain = dma_ops_domain_alloc(); 2471 if (!dma_domain)
2472 if (!dma_domain) 2472 goto out;
2473 goto out; 2473 dma_domain->target_dev = devid;
2474 dma_domain->target_dev = devid; 2474
2475 2475 spin_lock_irqsave(&iommu_pd_list_lock, flags);
2476 spin_lock_irqsave(&iommu_pd_list_lock, flags); 2476 list_add_tail(&dma_domain->list, &iommu_pd_list);
2477 list_add_tail(&dma_domain->list, &iommu_pd_list); 2477 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
2478 spin_unlock_irqrestore(&iommu_pd_list_lock, flags); 2478 }
2479
2480 dev_data = get_dev_data(dev);
2481 2479
2482 dev->archdata.dma_ops = &amd_iommu_dma_ops; 2480 dev->archdata.dma_ops = &amd_iommu_dma_ops;
2483 2481
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index b6ecddb63cd0..e3c2d74b7684 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -980,7 +980,7 @@ static void __init free_iommu_all(void)
980 * BIOS should disable L2B micellaneous clock gating by setting 980 * BIOS should disable L2B micellaneous clock gating by setting
981 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b 981 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
982 */ 982 */
983static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) 983static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
984{ 984{
985 u32 value; 985 u32 value;
986 986
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index d56f8c17c5fe..7c11ff368d07 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -2,7 +2,6 @@
2#include <linux/cpumask.h> 2#include <linux/cpumask.h>
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/cpumask.h>
6#include <linux/errno.h> 5#include <linux/errno.h>
7#include <linux/msi.h> 6#include <linux/msi.h>
8#include <linux/irq.h> 7#include <linux/irq.h>
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 66120bd46d15..10744091e6ca 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -6,6 +6,7 @@
6 6
7#include "dm.h" 7#include "dm.h"
8#include "dm-bio-prison.h" 8#include "dm-bio-prison.h"
9#include "dm-bio-record.h"
9#include "dm-cache-metadata.h" 10#include "dm-cache-metadata.h"
10 11
11#include <linux/dm-io.h> 12#include <linux/dm-io.h>
@@ -201,10 +202,15 @@ struct per_bio_data {
201 unsigned req_nr:2; 202 unsigned req_nr:2;
202 struct dm_deferred_entry *all_io_entry; 203 struct dm_deferred_entry *all_io_entry;
203 204
204 /* writethrough fields */ 205 /*
206 * writethrough fields. These MUST remain at the end of this
207 * structure and the 'cache' member must be the first as it
208 * is used to determine the offsetof the writethrough fields.
209 */
205 struct cache *cache; 210 struct cache *cache;
206 dm_cblock_t cblock; 211 dm_cblock_t cblock;
207 bio_end_io_t *saved_bi_end_io; 212 bio_end_io_t *saved_bi_end_io;
213 struct dm_bio_details bio_details;
208}; 214};
209 215
210struct dm_cache_migration { 216struct dm_cache_migration {
@@ -513,16 +519,28 @@ static void save_stats(struct cache *cache)
513/*---------------------------------------------------------------- 519/*----------------------------------------------------------------
514 * Per bio data 520 * Per bio data
515 *--------------------------------------------------------------*/ 521 *--------------------------------------------------------------*/
516static struct per_bio_data *get_per_bio_data(struct bio *bio) 522
523/*
524 * If using writeback, leave out struct per_bio_data's writethrough fields.
525 */
526#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
527#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
528
529static size_t get_per_bio_data_size(struct cache *cache)
530{
531 return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
532}
533
534static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
517{ 535{
518 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 536 struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
519 BUG_ON(!pb); 537 BUG_ON(!pb);
520 return pb; 538 return pb;
521} 539}
522 540
523static struct per_bio_data *init_per_bio_data(struct bio *bio) 541static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
524{ 542{
525 struct per_bio_data *pb = get_per_bio_data(bio); 543 struct per_bio_data *pb = get_per_bio_data(bio, data_size);
526 544
527 pb->tick = false; 545 pb->tick = false;
528 pb->req_nr = dm_bio_get_target_bio_nr(bio); 546 pb->req_nr = dm_bio_get_target_bio_nr(bio);
@@ -556,7 +574,8 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
556static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) 574static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
557{ 575{
558 unsigned long flags; 576 unsigned long flags;
559 struct per_bio_data *pb = get_per_bio_data(bio); 577 size_t pb_data_size = get_per_bio_data_size(cache);
578 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
560 579
561 spin_lock_irqsave(&cache->lock, flags); 580 spin_lock_irqsave(&cache->lock, flags);
562 if (cache->need_tick_bio && 581 if (cache->need_tick_bio &&
@@ -635,7 +654,7 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
635 654
636static void writethrough_endio(struct bio *bio, int err) 655static void writethrough_endio(struct bio *bio, int err)
637{ 656{
638 struct per_bio_data *pb = get_per_bio_data(bio); 657 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
639 bio->bi_end_io = pb->saved_bi_end_io; 658 bio->bi_end_io = pb->saved_bi_end_io;
640 659
641 if (err) { 660 if (err) {
@@ -643,6 +662,7 @@ static void writethrough_endio(struct bio *bio, int err)
643 return; 662 return;
644 } 663 }
645 664
665 dm_bio_restore(&pb->bio_details, bio);
646 remap_to_cache(pb->cache, bio, pb->cblock); 666 remap_to_cache(pb->cache, bio, pb->cblock);
647 667
648 /* 668 /*
@@ -662,11 +682,12 @@ static void writethrough_endio(struct bio *bio, int err)
662static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, 682static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
663 dm_oblock_t oblock, dm_cblock_t cblock) 683 dm_oblock_t oblock, dm_cblock_t cblock)
664{ 684{
665 struct per_bio_data *pb = get_per_bio_data(bio); 685 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
666 686
667 pb->cache = cache; 687 pb->cache = cache;
668 pb->cblock = cblock; 688 pb->cblock = cblock;
669 pb->saved_bi_end_io = bio->bi_end_io; 689 pb->saved_bi_end_io = bio->bi_end_io;
690 dm_bio_record(&pb->bio_details, bio);
670 bio->bi_end_io = writethrough_endio; 691 bio->bi_end_io = writethrough_endio;
671 692
672 remap_to_origin_clear_discard(pb->cache, bio, oblock); 693 remap_to_origin_clear_discard(pb->cache, bio, oblock);
@@ -1035,7 +1056,8 @@ static void defer_bio(struct cache *cache, struct bio *bio)
1035 1056
1036static void process_flush_bio(struct cache *cache, struct bio *bio) 1057static void process_flush_bio(struct cache *cache, struct bio *bio)
1037{ 1058{
1038 struct per_bio_data *pb = get_per_bio_data(bio); 1059 size_t pb_data_size = get_per_bio_data_size(cache);
1060 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1039 1061
1040 BUG_ON(bio->bi_size); 1062 BUG_ON(bio->bi_size);
1041 if (!pb->req_nr) 1063 if (!pb->req_nr)
@@ -1107,7 +1129,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1107 dm_oblock_t block = get_bio_block(cache, bio); 1129 dm_oblock_t block = get_bio_block(cache, bio);
1108 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; 1130 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1109 struct policy_result lookup_result; 1131 struct policy_result lookup_result;
1110 struct per_bio_data *pb = get_per_bio_data(bio); 1132 size_t pb_data_size = get_per_bio_data_size(cache);
1133 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1111 bool discarded_block = is_discarded_oblock(cache, block); 1134 bool discarded_block = is_discarded_oblock(cache, block);
1112 bool can_migrate = discarded_block || spare_migration_bandwidth(cache); 1135 bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
1113 1136
@@ -1881,7 +1904,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1881 1904
1882 cache->ti = ca->ti; 1905 cache->ti = ca->ti;
1883 ti->private = cache; 1906 ti->private = cache;
1884 ti->per_bio_data_size = sizeof(struct per_bio_data);
1885 ti->num_flush_bios = 2; 1907 ti->num_flush_bios = 2;
1886 ti->flush_supported = true; 1908 ti->flush_supported = true;
1887 1909
@@ -1890,6 +1912,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1890 ti->discard_zeroes_data_unsupported = true; 1912 ti->discard_zeroes_data_unsupported = true;
1891 1913
1892 memcpy(&cache->features, &ca->features, sizeof(cache->features)); 1914 memcpy(&cache->features, &ca->features, sizeof(cache->features));
1915 ti->per_bio_data_size = get_per_bio_data_size(cache);
1893 1916
1894 cache->callbacks.congested_fn = cache_is_congested; 1917 cache->callbacks.congested_fn = cache_is_congested;
1895 dm_table_add_target_callbacks(ti->table, &cache->callbacks); 1918 dm_table_add_target_callbacks(ti->table, &cache->callbacks);
@@ -2092,6 +2115,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2092 2115
2093 int r; 2116 int r;
2094 dm_oblock_t block = get_bio_block(cache, bio); 2117 dm_oblock_t block = get_bio_block(cache, bio);
2118 size_t pb_data_size = get_per_bio_data_size(cache);
2095 bool can_migrate = false; 2119 bool can_migrate = false;
2096 bool discarded_block; 2120 bool discarded_block;
2097 struct dm_bio_prison_cell *cell; 2121 struct dm_bio_prison_cell *cell;
@@ -2108,7 +2132,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2108 return DM_MAPIO_REMAPPED; 2132 return DM_MAPIO_REMAPPED;
2109 } 2133 }
2110 2134
2111 pb = init_per_bio_data(bio); 2135 pb = init_per_bio_data(bio, pb_data_size);
2112 2136
2113 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { 2137 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
2114 defer_bio(cache, bio); 2138 defer_bio(cache, bio);
@@ -2193,7 +2217,8 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2193{ 2217{
2194 struct cache *cache = ti->private; 2218 struct cache *cache = ti->private;
2195 unsigned long flags; 2219 unsigned long flags;
2196 struct per_bio_data *pb = get_per_bio_data(bio); 2220 size_t pb_data_size = get_per_bio_data_size(cache);
2221 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
2197 2222
2198 if (pb->tick) { 2223 if (pb->tick) {
2199 policy_tick(cache->policy); 2224 policy_tick(cache->policy);
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index d4e7567b367c..0b899cb6cda1 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -724,7 +724,7 @@ static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
724 if (enable) { 724 if (enable) {
725 if (is_code(code, M5MOLS_RESTYPE_MONITOR)) 725 if (is_code(code, M5MOLS_RESTYPE_MONITOR))
726 ret = m5mols_start_monitor(info); 726 ret = m5mols_start_monitor(info);
727 if (is_code(code, M5MOLS_RESTYPE_CAPTURE)) 727 else if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
728 ret = m5mols_start_capture(info); 728 ret = m5mols_start_capture(info);
729 else 729 else
730 ret = -EINVAL; 730 ret = -EINVAL;
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index ccd18e4ee789..54579e4c740b 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -250,17 +250,19 @@ static u8 SRAM_Table[][60] =
250 vdelay start of active video in 2 * field lines relative to 250 vdelay start of active video in 2 * field lines relative to
251 trailing edge of /VRESET pulse (VDELAY register). 251 trailing edge of /VRESET pulse (VDELAY register).
252 sheight height of active video in 2 * field lines. 252 sheight height of active video in 2 * field lines.
253 extraheight Added to sheight for cropcap.bounds.height only
253 videostart0 ITU-R frame line number of the line corresponding 254 videostart0 ITU-R frame line number of the line corresponding
254 to vdelay in the first field. */ 255 to vdelay in the first field. */
255#define CROPCAP(minhdelayx1, hdelayx1, swidth, totalwidth, sqwidth, \ 256#define CROPCAP(minhdelayx1, hdelayx1, swidth, totalwidth, sqwidth, \
256 vdelay, sheight, videostart0) \ 257 vdelay, sheight, extraheight, videostart0) \
257 .cropcap.bounds.left = minhdelayx1, \ 258 .cropcap.bounds.left = minhdelayx1, \
258 /* * 2 because vertically we count field lines times two, */ \ 259 /* * 2 because vertically we count field lines times two, */ \
259 /* e.g. 23 * 2 to 23 * 2 + 576 in PAL-BGHI defrect. */ \ 260 /* e.g. 23 * 2 to 23 * 2 + 576 in PAL-BGHI defrect. */ \
260 .cropcap.bounds.top = (videostart0) * 2 - (vdelay) + MIN_VDELAY, \ 261 .cropcap.bounds.top = (videostart0) * 2 - (vdelay) + MIN_VDELAY, \
261 /* 4 is a safety margin at the end of the line. */ \ 262 /* 4 is a safety margin at the end of the line. */ \
262 .cropcap.bounds.width = (totalwidth) - (minhdelayx1) - 4, \ 263 .cropcap.bounds.width = (totalwidth) - (minhdelayx1) - 4, \
263 .cropcap.bounds.height = (sheight) + (vdelay) - MIN_VDELAY, \ 264 .cropcap.bounds.height = (sheight) + (extraheight) + (vdelay) - \
265 MIN_VDELAY, \
264 .cropcap.defrect.left = hdelayx1, \ 266 .cropcap.defrect.left = hdelayx1, \
265 .cropcap.defrect.top = (videostart0) * 2, \ 267 .cropcap.defrect.top = (videostart0) * 2, \
266 .cropcap.defrect.width = swidth, \ 268 .cropcap.defrect.width = swidth, \
@@ -301,9 +303,10 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
301 /* totalwidth */ 1135, 303 /* totalwidth */ 1135,
302 /* sqwidth */ 944, 304 /* sqwidth */ 944,
303 /* vdelay */ 0x20, 305 /* vdelay */ 0x20,
304 /* bt878 (and bt848?) can capture another 306 /* sheight */ 576,
305 line below active video. */ 307 /* bt878 (and bt848?) can capture another
306 /* sheight */ (576 + 2) + 0x20 - 2, 308 line below active video. */
309 /* extraheight */ 2,
307 /* videostart0 */ 23) 310 /* videostart0 */ 23)
308 },{ 311 },{
309 .v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR, 312 .v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR,
@@ -330,6 +333,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
330 /* sqwidth */ 780, 333 /* sqwidth */ 780,
331 /* vdelay */ 0x1a, 334 /* vdelay */ 0x1a,
332 /* sheight */ 480, 335 /* sheight */ 480,
336 /* extraheight */ 0,
333 /* videostart0 */ 23) 337 /* videostart0 */ 23)
334 },{ 338 },{
335 .v4l2_id = V4L2_STD_SECAM, 339 .v4l2_id = V4L2_STD_SECAM,
@@ -355,6 +359,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
355 /* sqwidth */ 944, 359 /* sqwidth */ 944,
356 /* vdelay */ 0x20, 360 /* vdelay */ 0x20,
357 /* sheight */ 576, 361 /* sheight */ 576,
362 /* extraheight */ 0,
358 /* videostart0 */ 23) 363 /* videostart0 */ 23)
359 },{ 364 },{
360 .v4l2_id = V4L2_STD_PAL_Nc, 365 .v4l2_id = V4L2_STD_PAL_Nc,
@@ -380,6 +385,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
380 /* sqwidth */ 780, 385 /* sqwidth */ 780,
381 /* vdelay */ 0x1a, 386 /* vdelay */ 0x1a,
382 /* sheight */ 576, 387 /* sheight */ 576,
388 /* extraheight */ 0,
383 /* videostart0 */ 23) 389 /* videostart0 */ 23)
384 },{ 390 },{
385 .v4l2_id = V4L2_STD_PAL_M, 391 .v4l2_id = V4L2_STD_PAL_M,
@@ -405,6 +411,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
405 /* sqwidth */ 780, 411 /* sqwidth */ 780,
406 /* vdelay */ 0x1a, 412 /* vdelay */ 0x1a,
407 /* sheight */ 480, 413 /* sheight */ 480,
414 /* extraheight */ 0,
408 /* videostart0 */ 23) 415 /* videostart0 */ 23)
409 },{ 416 },{
410 .v4l2_id = V4L2_STD_PAL_N, 417 .v4l2_id = V4L2_STD_PAL_N,
@@ -430,6 +437,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
430 /* sqwidth */ 944, 437 /* sqwidth */ 944,
431 /* vdelay */ 0x20, 438 /* vdelay */ 0x20,
432 /* sheight */ 576, 439 /* sheight */ 576,
440 /* extraheight */ 0,
433 /* videostart0 */ 23) 441 /* videostart0 */ 23)
434 },{ 442 },{
435 .v4l2_id = V4L2_STD_NTSC_M_JP, 443 .v4l2_id = V4L2_STD_NTSC_M_JP,
@@ -455,6 +463,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
455 /* sqwidth */ 780, 463 /* sqwidth */ 780,
456 /* vdelay */ 0x16, 464 /* vdelay */ 0x16,
457 /* sheight */ 480, 465 /* sheight */ 480,
466 /* extraheight */ 0,
458 /* videostart0 */ 23) 467 /* videostart0 */ 23)
459 },{ 468 },{
460 /* that one hopefully works with the strange timing 469 /* that one hopefully works with the strange timing
@@ -484,6 +493,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
484 /* sqwidth */ 944, 493 /* sqwidth */ 944,
485 /* vdelay */ 0x1a, 494 /* vdelay */ 0x1a,
486 /* sheight */ 480, 495 /* sheight */ 480,
496 /* extraheight */ 0,
487 /* videostart0 */ 23) 497 /* videostart0 */ 23)
488 } 498 }
489}; 499};
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 05d7b6333461..a0639e779973 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -204,7 +204,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
204 204
205config VIDEO_SH_VEU 205config VIDEO_SH_VEU
206 tristate "SuperH VEU mem2mem video processing driver" 206 tristate "SuperH VEU mem2mem video processing driver"
207 depends on VIDEO_DEV && VIDEO_V4L2 207 depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS
208 select VIDEOBUF2_DMA_CONTIG 208 select VIDEOBUF2_DMA_CONTIG
209 select V4L2_MEM2MEM_DEV 209 select V4L2_MEM2MEM_DEV
210 help 210 help
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 82d9f6ac12f3..33b5ffc8d66d 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -1054,16 +1054,18 @@ static int gsc_m2m_suspend(struct gsc_dev *gsc)
1054 1054
1055static int gsc_m2m_resume(struct gsc_dev *gsc) 1055static int gsc_m2m_resume(struct gsc_dev *gsc)
1056{ 1056{
1057 struct gsc_ctx *ctx;
1057 unsigned long flags; 1058 unsigned long flags;
1058 1059
1059 spin_lock_irqsave(&gsc->slock, flags); 1060 spin_lock_irqsave(&gsc->slock, flags);
1060 /* Clear for full H/W setup in first run after resume */ 1061 /* Clear for full H/W setup in first run after resume */
1062 ctx = gsc->m2m.ctx;
1061 gsc->m2m.ctx = NULL; 1063 gsc->m2m.ctx = NULL;
1062 spin_unlock_irqrestore(&gsc->slock, flags); 1064 spin_unlock_irqrestore(&gsc->slock, flags);
1063 1065
1064 if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state)) 1066 if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
1065 gsc_m2m_job_finish(gsc->m2m.ctx, 1067 gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
1066 VB2_BUF_STATE_ERROR); 1068
1067 return 0; 1069 return 0;
1068} 1070}
1069 1071
@@ -1204,7 +1206,7 @@ static int gsc_resume(struct device *dev)
1204 /* Do not resume if the device was idle before system suspend */ 1206 /* Do not resume if the device was idle before system suspend */
1205 spin_lock_irqsave(&gsc->slock, flags); 1207 spin_lock_irqsave(&gsc->slock, flags);
1206 if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) || 1208 if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) ||
1207 !gsc_m2m_active(gsc)) { 1209 !gsc_m2m_opened(gsc)) {
1208 spin_unlock_irqrestore(&gsc->slock, flags); 1210 spin_unlock_irqrestore(&gsc->slock, flags);
1209 return 0; 1211 return 0;
1210 } 1212 }
diff --git a/drivers/media/platform/s5p-fimc/fimc-core.c b/drivers/media/platform/s5p-fimc/fimc-core.c
index e3916bde45cf..0f513dd19f86 100644
--- a/drivers/media/platform/s5p-fimc/fimc-core.c
+++ b/drivers/media/platform/s5p-fimc/fimc-core.c
@@ -850,16 +850,18 @@ static int fimc_m2m_suspend(struct fimc_dev *fimc)
850 850
851static int fimc_m2m_resume(struct fimc_dev *fimc) 851static int fimc_m2m_resume(struct fimc_dev *fimc)
852{ 852{
853 struct fimc_ctx *ctx;
853 unsigned long flags; 854 unsigned long flags;
854 855
855 spin_lock_irqsave(&fimc->slock, flags); 856 spin_lock_irqsave(&fimc->slock, flags);
856 /* Clear for full H/W setup in first run after resume */ 857 /* Clear for full H/W setup in first run after resume */
858 ctx = fimc->m2m.ctx;
857 fimc->m2m.ctx = NULL; 859 fimc->m2m.ctx = NULL;
858 spin_unlock_irqrestore(&fimc->slock, flags); 860 spin_unlock_irqrestore(&fimc->slock, flags);
859 861
860 if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state)) 862 if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state))
861 fimc_m2m_job_finish(fimc->m2m.ctx, 863 fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
862 VB2_BUF_STATE_ERROR); 864
863 return 0; 865 return 0;
864} 866}
865 867
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite-reg.c b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
index f0af0754a7b4..ac9663ce2a49 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
+++ b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
@@ -128,10 +128,10 @@ static const u32 src_pixfmt_map[8][3] = {
128void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f) 128void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
129{ 129{
130 enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code; 130 enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code;
131 unsigned int i = ARRAY_SIZE(src_pixfmt_map); 131 int i = ARRAY_SIZE(src_pixfmt_map);
132 u32 cfg; 132 u32 cfg;
133 133
134 while (i-- >= 0) { 134 while (--i >= 0) {
135 if (src_pixfmt_map[i][0] == pixelcode) 135 if (src_pixfmt_map[i][0] == pixelcode)
136 break; 136 break;
137 } 137 }
@@ -224,9 +224,9 @@ static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
224 { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY }, 224 { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
225 }; 225 };
226 u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT); 226 u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
227 unsigned int i = ARRAY_SIZE(pixcode); 227 int i = ARRAY_SIZE(pixcode);
228 228
229 while (i-- >= 0) 229 while (--i >= 0)
230 if (pixcode[i][0] == dev->fmt->mbus_code) 230 if (pixcode[i][0] == dev->fmt->mbus_code)
231 break; 231 break;
232 cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK; 232 cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite.c b/drivers/media/platform/s5p-fimc/fimc-lite.c
index bfc4206935c8..bbc35de7db27 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite.c
+++ b/drivers/media/platform/s5p-fimc/fimc-lite.c
@@ -1408,6 +1408,7 @@ static const struct v4l2_ctrl_config fimc_lite_ctrl = {
1408 .id = V4L2_CTRL_CLASS_USER | 0x1001, 1408 .id = V4L2_CTRL_CLASS_USER | 0x1001,
1409 .type = V4L2_CTRL_TYPE_BOOLEAN, 1409 .type = V4L2_CTRL_TYPE_BOOLEAN,
1410 .name = "Test Pattern 640x480", 1410 .name = "Test Pattern 640x480",
1411 .step = 1,
1411}; 1412};
1412 1413
1413static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc) 1414static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
index a17fcb2d5d41..cd38d708ab58 100644
--- a/drivers/media/platform/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
@@ -827,7 +827,7 @@ static int fimc_md_link_notify(struct media_pad *source,
827 struct fimc_pipeline *pipeline; 827 struct fimc_pipeline *pipeline;
828 struct v4l2_subdev *sd; 828 struct v4l2_subdev *sd;
829 struct mutex *lock; 829 struct mutex *lock;
830 int ret = 0; 830 int i, ret = 0;
831 int ref_count; 831 int ref_count;
832 832
833 if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV) 833 if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
@@ -854,29 +854,28 @@ static int fimc_md_link_notify(struct media_pad *source,
854 return 0; 854 return 0;
855 } 855 }
856 856
857 mutex_lock(lock);
858 ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
859
857 if (!(flags & MEDIA_LNK_FL_ENABLED)) { 860 if (!(flags & MEDIA_LNK_FL_ENABLED)) {
858 int i; 861 if (ref_count > 0) {
859 mutex_lock(lock); 862 ret = __fimc_pipeline_close(pipeline);
860 ret = __fimc_pipeline_close(pipeline); 863 if (!ret && fimc)
864 fimc_ctrls_delete(fimc->vid_cap.ctx);
865 }
861 for (i = 0; i < IDX_MAX; i++) 866 for (i = 0; i < IDX_MAX; i++)
862 pipeline->subdevs[i] = NULL; 867 pipeline->subdevs[i] = NULL;
863 if (fimc) 868 } else if (ref_count > 0) {
864 fimc_ctrls_delete(fimc->vid_cap.ctx); 869 /*
865 mutex_unlock(lock); 870 * Link activation. Enable power of pipeline elements only if
866 return ret; 871 * the pipeline is already in use, i.e. its video node is open.
872 * Recreate the controls destroyed during the link deactivation.
873 */
874 ret = __fimc_pipeline_open(pipeline,
875 source->entity, true);
876 if (!ret && fimc)
877 ret = fimc_capture_ctrls_create(fimc);
867 } 878 }
868 /*
869 * Link activation. Enable power of pipeline elements only if the
870 * pipeline is already in use, i.e. its video node is opened.
871 * Recreate the controls destroyed during the link deactivation.
872 */
873 mutex_lock(lock);
874
875 ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
876 if (ref_count > 0)
877 ret = __fimc_pipeline_open(pipeline, source->entity, true);
878 if (!ret && fimc)
879 ret = fimc_capture_ctrls_create(fimc);
880 879
881 mutex_unlock(lock); 880 mutex_unlock(lock);
882 return ret ? -EPIPE : ret; 881 return ret ? -EPIPE : ret;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index e84703c314ce..1cb6d57987c6 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -276,7 +276,7 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
276 unsigned int frame_type; 276 unsigned int frame_type;
277 277
278 dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev); 278 dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
279 frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); 279 frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_disp_frame_type, ctx);
280 280
281 /* If frame is same as previous then skip and do not dequeue */ 281 /* If frame is same as previous then skip and do not dequeue */
282 if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) { 282 if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 2356fd52a169..4f6b553c4b2d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -232,6 +232,7 @@ static struct mfc_control controls[] = {
232 .minimum = 0, 232 .minimum = 0,
233 .maximum = 1, 233 .maximum = 1,
234 .default_value = 0, 234 .default_value = 0,
235 .step = 1,
235 .menu_skip_mask = 0, 236 .menu_skip_mask = 0,
236 }, 237 },
237 { 238 {
diff --git a/drivers/media/radio/radio-ma901.c b/drivers/media/radio/radio-ma901.c
index c61f590029ad..348dafc0318a 100644
--- a/drivers/media/radio/radio-ma901.c
+++ b/drivers/media/radio/radio-ma901.c
@@ -347,9 +347,20 @@ static void usb_ma901radio_release(struct v4l2_device *v4l2_dev)
347static int usb_ma901radio_probe(struct usb_interface *intf, 347static int usb_ma901radio_probe(struct usb_interface *intf,
348 const struct usb_device_id *id) 348 const struct usb_device_id *id)
349{ 349{
350 struct usb_device *dev = interface_to_usbdev(intf);
350 struct ma901radio_device *radio; 351 struct ma901radio_device *radio;
351 int retval = 0; 352 int retval = 0;
352 353
354 /* Masterkit MA901 usb radio has the same USB ID as many others
355 * Atmel V-USB devices. Let's make additional checks to be sure
356 * that this is our device.
357 */
358
359 if (dev->product && dev->manufacturer &&
360 (strncmp(dev->product, "MA901", 5) != 0
361 || strncmp(dev->manufacturer, "www.masterkit.ru", 16) != 0))
362 return -ENODEV;
363
353 radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL); 364 radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL);
354 if (!radio) { 365 if (!radio) {
355 dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n"); 366 dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n");
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 19f3563c61da..5a79c333d45e 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -291,7 +291,7 @@ config IR_TTUSBIR
291 291
292config IR_RX51 292config IR_RX51
293 tristate "Nokia N900 IR transmitter diode" 293 tristate "Nokia N900 IR transmitter diode"
294 depends on OMAP_DM_TIMER && LIRC && !ARCH_MULTIPLATFORM 294 depends on OMAP_DM_TIMER && ARCH_OMAP2PLUS && LIRC && !ARCH_MULTIPLATFORM
295 ---help--- 295 ---help---
296 Say Y or M here if you want to enable support for the IR 296 Say Y or M here if you want to enable support for the IR
297 transmitter diode built in the Nokia N900 (RX51) device. 297 transmitter diode built in the Nokia N900 (RX51) device.
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index a9d355230e8e..768aaf62d5dc 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -10,7 +10,7 @@ ifeq ($(CONFIG_COMPAT),y)
10 videodev-objs += v4l2-compat-ioctl32.o 10 videodev-objs += v4l2-compat-ioctl32.o
11endif 11endif
12 12
13obj-$(CONFIG_VIDEO_DEV) += videodev.o 13obj-$(CONFIG_VIDEO_V4L2) += videodev.o
14obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o 14obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
15obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o 15obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
16 16
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 45ea7185c003..642c6223fa6c 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -152,6 +152,20 @@ static void mei_me_intr_disable(struct mei_device *dev)
152} 152}
153 153
154/** 154/**
155 * mei_me_hw_reset_release - release device from the reset
156 *
157 * @dev: the device structure
158 */
159static void mei_me_hw_reset_release(struct mei_device *dev)
160{
161 struct mei_me_hw *hw = to_me_hw(dev);
162 u32 hcsr = mei_hcsr_read(hw);
163
164 hcsr |= H_IG;
165 hcsr &= ~H_RST;
166 mei_hcsr_set(hw, hcsr);
167}
168/**
155 * mei_me_hw_reset - resets fw via mei csr register. 169 * mei_me_hw_reset - resets fw via mei csr register.
156 * 170 *
157 * @dev: the device structure 171 * @dev: the device structure
@@ -169,18 +183,14 @@ static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
169 if (intr_enable) 183 if (intr_enable)
170 hcsr |= H_IE; 184 hcsr |= H_IE;
171 else 185 else
172 hcsr &= ~H_IE; 186 hcsr |= ~H_IE;
173
174 mei_hcsr_set(hw, hcsr);
175
176 hcsr = mei_hcsr_read(hw) | H_IG;
177 hcsr &= ~H_RST;
178 187
179 mei_hcsr_set(hw, hcsr); 188 mei_hcsr_set(hw, hcsr);
180 189
181 hcsr = mei_hcsr_read(hw); 190 if (dev->dev_state == MEI_DEV_POWER_DOWN)
191 mei_me_hw_reset_release(dev);
182 192
183 dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr); 193 dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", mei_hcsr_read(hw));
184} 194}
185 195
186/** 196/**
@@ -466,7 +476,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
466 mutex_unlock(&dev->device_lock); 476 mutex_unlock(&dev->device_lock);
467 return IRQ_HANDLED; 477 return IRQ_HANDLED;
468 } else { 478 } else {
469 dev_dbg(&dev->pdev->dev, "FW not ready.\n"); 479 dev_dbg(&dev->pdev->dev, "Reset Completed.\n");
480 mei_me_hw_reset_release(dev);
470 mutex_unlock(&dev->device_lock); 481 mutex_unlock(&dev->device_lock);
471 return IRQ_HANDLED; 482 return IRQ_HANDLED;
472 } 483 }
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 6ec530168afb..356179991a2e 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -183,6 +183,24 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
183 mei_cl_all_write_clear(dev); 183 mei_cl_all_write_clear(dev);
184} 184}
185 185
186void mei_stop(struct mei_device *dev)
187{
188 dev_dbg(&dev->pdev->dev, "stopping the device.\n");
189
190 mutex_lock(&dev->device_lock);
191
192 cancel_delayed_work(&dev->timer_work);
193
194 mei_wd_stop(dev);
195
196 dev->dev_state = MEI_DEV_POWER_DOWN;
197 mei_reset(dev, 0);
198
199 mutex_unlock(&dev->device_lock);
200
201 flush_scheduled_work();
202}
203
186 204
187 205
188 206
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index cb80166161f0..97873812e33b 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -381,6 +381,7 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
381void mei_device_init(struct mei_device *dev); 381void mei_device_init(struct mei_device *dev);
382void mei_reset(struct mei_device *dev, int interrupts); 382void mei_reset(struct mei_device *dev, int interrupts);
383int mei_hw_init(struct mei_device *dev); 383int mei_hw_init(struct mei_device *dev);
384void mei_stop(struct mei_device *dev);
384 385
385/* 386/*
386 * MEI interrupt functions prototype 387 * MEI interrupt functions prototype
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index b40ec0601ab0..b8b5c9c3ad03 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -247,44 +247,14 @@ static void mei_remove(struct pci_dev *pdev)
247 247
248 hw = to_me_hw(dev); 248 hw = to_me_hw(dev);
249 249
250 mutex_lock(&dev->device_lock);
251
252 cancel_delayed_work(&dev->timer_work);
253 250
254 mei_wd_stop(dev); 251 dev_err(&pdev->dev, "stop\n");
252 mei_stop(dev);
255 253
256 mei_pdev = NULL; 254 mei_pdev = NULL;
257 255
258 if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
259 dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
260 mei_cl_disconnect(&dev->iamthif_cl);
261 }
262 if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
263 dev->wd_cl.state = MEI_FILE_DISCONNECTING;
264 mei_cl_disconnect(&dev->wd_cl);
265 }
266
267 /* Unregistering watchdog device */
268 mei_watchdog_unregister(dev); 256 mei_watchdog_unregister(dev);
269 257
270 /* remove entry if already in list */
271 dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
272
273 if (dev->open_handle_count > 0)
274 dev->open_handle_count--;
275 mei_cl_unlink(&dev->wd_cl);
276
277 if (dev->open_handle_count > 0)
278 dev->open_handle_count--;
279 mei_cl_unlink(&dev->iamthif_cl);
280
281 dev->iamthif_current_cb = NULL;
282 dev->me_clients_num = 0;
283
284 mutex_unlock(&dev->device_lock);
285
286 flush_scheduled_work();
287
288 /* disable interrupts */ 258 /* disable interrupts */
289 mei_disable_interrupts(dev); 259 mei_disable_interrupts(dev);
290 260
@@ -308,28 +278,20 @@ static int mei_pci_suspend(struct device *device)
308{ 278{
309 struct pci_dev *pdev = to_pci_dev(device); 279 struct pci_dev *pdev = to_pci_dev(device);
310 struct mei_device *dev = pci_get_drvdata(pdev); 280 struct mei_device *dev = pci_get_drvdata(pdev);
311 int err;
312 281
313 if (!dev) 282 if (!dev)
314 return -ENODEV; 283 return -ENODEV;
315 mutex_lock(&dev->device_lock);
316 284
317 cancel_delayed_work(&dev->timer_work); 285 dev_err(&pdev->dev, "suspend\n");
318 286
319 /* Stop watchdog if exists */ 287 mei_stop(dev);
320 err = mei_wd_stop(dev); 288
321 /* Set new mei state */ 289 mei_disable_interrupts(dev);
322 if (dev->dev_state == MEI_DEV_ENABLED ||
323 dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
324 dev->dev_state = MEI_DEV_POWER_DOWN;
325 mei_reset(dev, 0);
326 }
327 mutex_unlock(&dev->device_lock);
328 290
329 free_irq(pdev->irq, dev); 291 free_irq(pdev->irq, dev);
330 pci_disable_msi(pdev); 292 pci_disable_msi(pdev);
331 293
332 return err; 294 return 0;
333} 295}
334 296
335static int mei_pci_resume(struct device *device) 297static int mei_pci_resume(struct device *device)
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig
index 39c2ecadb273..ea98f7e9ccd1 100644
--- a/drivers/misc/vmw_vmci/Kconfig
+++ b/drivers/misc/vmw_vmci/Kconfig
@@ -4,7 +4,7 @@
4 4
5config VMWARE_VMCI 5config VMWARE_VMCI
6 tristate "VMware VMCI Driver" 6 tristate "VMware VMCI Driver"
7 depends on X86 && PCI 7 depends on X86 && PCI && NET
8 help 8 help
9 This is VMware's Virtual Machine Communication Interface. It enables 9 This is VMware's Virtual Machine Communication Interface. It enables
10 high-speed communication between host and guest in a virtual 10 high-speed communication between host and guest in a virtual
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
index ed5c433cd493..f3cdd904fe4d 100644
--- a/drivers/misc/vmw_vmci/vmci_datagram.c
+++ b/drivers/misc/vmw_vmci/vmci_datagram.c
@@ -42,9 +42,11 @@ struct datagram_entry {
42 42
43struct delayed_datagram_info { 43struct delayed_datagram_info {
44 struct datagram_entry *entry; 44 struct datagram_entry *entry;
45 struct vmci_datagram msg;
46 struct work_struct work; 45 struct work_struct work;
47 bool in_dg_host_queue; 46 bool in_dg_host_queue;
47 /* msg and msg_payload must be together. */
48 struct vmci_datagram msg;
49 u8 msg_payload[];
48}; 50};
49 51
50/* Number of in-flight host->host datagrams */ 52/* Number of in-flight host->host datagrams */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6bbd90e1123c..07401a3e256b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1976,12 +1976,11 @@ static int __bond_release_one(struct net_device *bond_dev,
1976 return -EINVAL; 1976 return -EINVAL;
1977 } 1977 }
1978 1978
1979 write_unlock_bh(&bond->lock);
1979 /* unregister rx_handler early so bond_handle_frame wouldn't be called 1980 /* unregister rx_handler early so bond_handle_frame wouldn't be called
1980 * for this slave anymore. 1981 * for this slave anymore.
1981 */ 1982 */
1982 netdev_rx_handler_unregister(slave_dev); 1983 netdev_rx_handler_unregister(slave_dev);
1983 write_unlock_bh(&bond->lock);
1984 synchronize_net();
1985 write_lock_bh(&bond->lock); 1984 write_lock_bh(&bond->lock);
1986 1985
1987 if (!all && !bond->params.fail_over_mac) { 1986 if (!all && !bond->params.fail_over_mac) {
@@ -4847,9 +4846,18 @@ static int __net_init bond_net_init(struct net *net)
4847static void __net_exit bond_net_exit(struct net *net) 4846static void __net_exit bond_net_exit(struct net *net)
4848{ 4847{
4849 struct bond_net *bn = net_generic(net, bond_net_id); 4848 struct bond_net *bn = net_generic(net, bond_net_id);
4849 struct bonding *bond, *tmp_bond;
4850 LIST_HEAD(list);
4850 4851
4851 bond_destroy_sysfs(bn); 4852 bond_destroy_sysfs(bn);
4852 bond_destroy_proc_dir(bn); 4853 bond_destroy_proc_dir(bn);
4854
4855 /* Kill off any bonds created after unregistering bond rtnl ops */
4856 rtnl_lock();
4857 list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
4858 unregister_netdevice_queue(bond->dev, &list);
4859 unregister_netdevice_many(&list);
4860 rtnl_unlock();
4853} 4861}
4854 4862
4855static struct pernet_operations bond_net_ops = { 4863static struct pernet_operations bond_net_ops = {
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 1c9e09fbdff8..ea7a388f4843 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -183,6 +183,11 @@ int bond_create_slave_symlinks(struct net_device *master,
183 sprintf(linkname, "slave_%s", slave->name); 183 sprintf(linkname, "slave_%s", slave->name);
184 ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj), 184 ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
185 linkname); 185 linkname);
186
187 /* free the master link created earlier in case of error */
188 if (ret)
189 sysfs_remove_link(&(slave->dev.kobj), "master");
190
186 return ret; 191 return ret;
187 192
188} 193}
@@ -522,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
522 goto out; 527 goto out;
523 } 528 }
524 if (new_value < 0) { 529 if (new_value < 0) {
525 pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n", 530 pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
526 bond->dev->name, new_value, INT_MAX); 531 bond->dev->name, new_value, INT_MAX);
527 ret = -EINVAL; 532 ret = -EINVAL;
528 goto out; 533 goto out;
@@ -537,14 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
537 pr_info("%s: Setting ARP monitoring interval to %d.\n", 542 pr_info("%s: Setting ARP monitoring interval to %d.\n",
538 bond->dev->name, new_value); 543 bond->dev->name, new_value);
539 bond->params.arp_interval = new_value; 544 bond->params.arp_interval = new_value;
540 if (bond->params.miimon) { 545 if (new_value) {
541 pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", 546 if (bond->params.miimon) {
542 bond->dev->name, bond->dev->name); 547 pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
543 bond->params.miimon = 0; 548 bond->dev->name, bond->dev->name);
544 } 549 bond->params.miimon = 0;
545 if (!bond->params.arp_targets[0]) { 550 }
546 pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", 551 if (!bond->params.arp_targets[0])
547 bond->dev->name); 552 pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
553 bond->dev->name);
548 } 554 }
549 if (bond->dev->flags & IFF_UP) { 555 if (bond->dev->flags & IFF_UP) {
550 /* If the interface is up, we may need to fire off 556 /* If the interface is up, we may need to fire off
@@ -552,10 +558,13 @@ static ssize_t bonding_store_arp_interval(struct device *d,
552 * timer will get fired off when the open function 558 * timer will get fired off when the open function
553 * is called. 559 * is called.
554 */ 560 */
555 cancel_delayed_work_sync(&bond->mii_work); 561 if (!new_value) {
556 queue_delayed_work(bond->wq, &bond->arp_work, 0); 562 cancel_delayed_work_sync(&bond->arp_work);
563 } else {
564 cancel_delayed_work_sync(&bond->mii_work);
565 queue_delayed_work(bond->wq, &bond->arp_work, 0);
566 }
557 } 567 }
558
559out: 568out:
560 rtnl_unlock(); 569 rtnl_unlock();
561 return ret; 570 return ret;
@@ -697,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
697 } 706 }
698 if (new_value < 0) { 707 if (new_value < 0) {
699 pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", 708 pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
700 bond->dev->name, new_value, 1, INT_MAX); 709 bond->dev->name, new_value, 0, INT_MAX);
701 ret = -EINVAL; 710 ret = -EINVAL;
702 goto out; 711 goto out;
703 } else { 712 } else {
@@ -752,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d,
752 goto out; 761 goto out;
753 } 762 }
754 if (new_value < 0) { 763 if (new_value < 0) {
755 pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", 764 pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
756 bond->dev->name, new_value, 1, INT_MAX); 765 bond->dev->name, new_value, 0, INT_MAX);
757 ret = -EINVAL; 766 ret = -EINVAL;
758 goto out; 767 goto out;
759 } else { 768 } else {
@@ -963,37 +972,37 @@ static ssize_t bonding_store_miimon(struct device *d,
963 } 972 }
964 if (new_value < 0) { 973 if (new_value < 0) {
965 pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n", 974 pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
966 bond->dev->name, new_value, 1, INT_MAX); 975 bond->dev->name, new_value, 0, INT_MAX);
967 ret = -EINVAL; 976 ret = -EINVAL;
968 goto out; 977 goto out;
969 } else { 978 }
970 pr_info("%s: Setting MII monitoring interval to %d.\n", 979 pr_info("%s: Setting MII monitoring interval to %d.\n",
971 bond->dev->name, new_value); 980 bond->dev->name, new_value);
972 bond->params.miimon = new_value; 981 bond->params.miimon = new_value;
973 if (bond->params.updelay) 982 if (bond->params.updelay)
974 pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", 983 pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
975 bond->dev->name, 984 bond->dev->name,
976 bond->params.updelay * bond->params.miimon); 985 bond->params.updelay * bond->params.miimon);
977 if (bond->params.downdelay) 986 if (bond->params.downdelay)
978 pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", 987 pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
979 bond->dev->name, 988 bond->dev->name,
980 bond->params.downdelay * bond->params.miimon); 989 bond->params.downdelay * bond->params.miimon);
981 if (bond->params.arp_interval) { 990 if (new_value && bond->params.arp_interval) {
982 pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", 991 pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
983 bond->dev->name); 992 bond->dev->name);
984 bond->params.arp_interval = 0; 993 bond->params.arp_interval = 0;
985 if (bond->params.arp_validate) { 994 if (bond->params.arp_validate)
986 bond->params.arp_validate = 995 bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
987 BOND_ARP_VALIDATE_NONE; 996 }
988 } 997 if (bond->dev->flags & IFF_UP) {
989 } 998 /* If the interface is up, we may need to fire off
990 999 * the MII timer. If the interface is down, the
991 if (bond->dev->flags & IFF_UP) { 1000 * timer will get fired off when the open function
992 /* If the interface is up, we may need to fire off 1001 * is called.
993 * the MII timer. If the interface is down, the 1002 */
994 * timer will get fired off when the open function 1003 if (!new_value) {
995 * is called. 1004 cancel_delayed_work_sync(&bond->mii_work);
996 */ 1005 } else {
997 cancel_delayed_work_sync(&bond->arp_work); 1006 cancel_delayed_work_sync(&bond->arp_work);
998 queue_delayed_work(bond->wq, &bond->mii_work, 0); 1007 queue_delayed_work(bond->wq, &bond->mii_work, 0);
999 } 1008 }
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index b39ca5b3ea7f..ff2ba86cd4a4 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -46,6 +46,7 @@ config CAN_EMS_PCI
46config CAN_PEAK_PCMCIA 46config CAN_PEAK_PCMCIA
47 tristate "PEAK PCAN-PC Card" 47 tristate "PEAK PCAN-PC Card"
48 depends on PCMCIA 48 depends on PCMCIA
49 depends on HAS_IOPORT
49 ---help--- 50 ---help---
50 This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) 51 This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
51 from PEAK-System (http://www.peak-system.com). To compile this 52 from PEAK-System (http://www.peak-system.com). To compile this
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index a042cdc260dc..3c18d7d000ed 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
348 */ 348 */
349 if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) == 349 if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
350 REG_CR_BASICCAN_INITIAL && 350 REG_CR_BASICCAN_INITIAL &&
351 (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) && 351 (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) &&
352 (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL)) 352 (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
353 flag = 1; 353 flag = 1;
354 354
@@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
360 * See states on p. 23 of the Datasheet. 360 * See states on p. 23 of the Datasheet.
361 */ 361 */
362 if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL && 362 if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
363 priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL && 363 priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL &&
364 priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL) 364 priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
365 return flag; 365 return flag;
366 366
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index daf4013a8fc7..e4df307eaa90 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
92 */ 92 */
93 spin_lock_irqsave(&priv->cmdreg_lock, flags); 93 spin_lock_irqsave(&priv->cmdreg_lock, flags);
94 priv->write_reg(priv, REG_CMR, val); 94 priv->write_reg(priv, REG_CMR, val);
95 priv->read_reg(priv, REG_SR); 95 priv->read_reg(priv, SJA1000_REG_SR);
96 spin_unlock_irqrestore(&priv->cmdreg_lock, flags); 96 spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
97} 97}
98 98
@@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
502 502
503 while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) { 503 while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
504 n++; 504 n++;
505 status = priv->read_reg(priv, REG_SR); 505 status = priv->read_reg(priv, SJA1000_REG_SR);
506 /* check for absent controller due to hw unplug */ 506 /* check for absent controller due to hw unplug */
507 if (status == 0xFF && sja1000_is_absent(priv)) 507 if (status == 0xFF && sja1000_is_absent(priv))
508 return IRQ_NONE; 508 return IRQ_NONE;
@@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
530 /* receive interrupt */ 530 /* receive interrupt */
531 while (status & SR_RBS) { 531 while (status & SR_RBS) {
532 sja1000_rx(dev); 532 sja1000_rx(dev);
533 status = priv->read_reg(priv, REG_SR); 533 status = priv->read_reg(priv, SJA1000_REG_SR);
534 /* check for absent controller */ 534 /* check for absent controller */
535 if (status == 0xFF && sja1000_is_absent(priv)) 535 if (status == 0xFF && sja1000_is_absent(priv))
536 return IRQ_NONE; 536 return IRQ_NONE;
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index afa99847a510..aa48e053da27 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -56,7 +56,7 @@
56/* SJA1000 registers - manual section 6.4 (Pelican Mode) */ 56/* SJA1000 registers - manual section 6.4 (Pelican Mode) */
57#define REG_MOD 0x00 57#define REG_MOD 0x00
58#define REG_CMR 0x01 58#define REG_CMR 0x01
59#define REG_SR 0x02 59#define SJA1000_REG_SR 0x02
60#define REG_IR 0x03 60#define REG_IR 0x03
61#define REG_IER 0x04 61#define REG_IER 0x04
62#define REG_ALC 0x0B 62#define REG_ALC 0x0B
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index 829b5ad71d0d..b5fd934585e9 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -186,7 +186,7 @@ struct atl1e_tpd_desc {
186/* how about 0x2000 */ 186/* how about 0x2000 */
187#define MAX_TX_BUF_LEN 0x2000 187#define MAX_TX_BUF_LEN 0x2000
188#define MAX_TX_BUF_SHIFT 13 188#define MAX_TX_BUF_SHIFT 13
189/*#define MAX_TX_BUF_LEN 0x3000 */ 189#define MAX_TSO_SEG_SIZE 0x3c00
190 190
191/* rrs word 1 bit 0:31 */ 191/* rrs word 1 bit 0:31 */
192#define RRS_RX_CSUM_MASK 0xFFFF 192#define RRS_RX_CSUM_MASK 0xFFFF
@@ -438,7 +438,6 @@ struct atl1e_adapter {
438 struct atl1e_hw hw; 438 struct atl1e_hw hw;
439 struct atl1e_hw_stats hw_stats; 439 struct atl1e_hw_stats hw_stats;
440 440
441 bool have_msi;
442 u32 wol; 441 u32 wol;
443 u16 link_speed; 442 u16 link_speed;
444 u16 link_duplex; 443 u16 link_duplex;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 92f4734f860d..ac25f05ff68f 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1849,34 +1849,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)
1849 struct net_device *netdev = adapter->netdev; 1849 struct net_device *netdev = adapter->netdev;
1850 1850
1851 free_irq(adapter->pdev->irq, netdev); 1851 free_irq(adapter->pdev->irq, netdev);
1852
1853 if (adapter->have_msi)
1854 pci_disable_msi(adapter->pdev);
1855} 1852}
1856 1853
1857static int atl1e_request_irq(struct atl1e_adapter *adapter) 1854static int atl1e_request_irq(struct atl1e_adapter *adapter)
1858{ 1855{
1859 struct pci_dev *pdev = adapter->pdev; 1856 struct pci_dev *pdev = adapter->pdev;
1860 struct net_device *netdev = adapter->netdev; 1857 struct net_device *netdev = adapter->netdev;
1861 int flags = 0;
1862 int err = 0; 1858 int err = 0;
1863 1859
1864 adapter->have_msi = true; 1860 err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name,
1865 err = pci_enable_msi(pdev); 1861 netdev);
1866 if (err) {
1867 netdev_dbg(netdev,
1868 "Unable to allocate MSI interrupt Error: %d\n", err);
1869 adapter->have_msi = false;
1870 }
1871
1872 if (!adapter->have_msi)
1873 flags |= IRQF_SHARED;
1874 err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
1875 if (err) { 1862 if (err) {
1876 netdev_dbg(adapter->netdev, 1863 netdev_dbg(adapter->netdev,
1877 "Unable to allocate interrupt Error: %d\n", err); 1864 "Unable to allocate interrupt Error: %d\n", err);
1878 if (adapter->have_msi)
1879 pci_disable_msi(pdev);
1880 return err; 1865 return err;
1881 } 1866 }
1882 netdev_dbg(netdev, "atl1e_request_irq OK\n"); 1867 netdev_dbg(netdev, "atl1e_request_irq OK\n");
@@ -2344,6 +2329,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2344 2329
2345 INIT_WORK(&adapter->reset_task, atl1e_reset_task); 2330 INIT_WORK(&adapter->reset_task, atl1e_reset_task);
2346 INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); 2331 INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
2332 netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
2347 err = register_netdev(netdev); 2333 err = register_netdev(netdev);
2348 if (err) { 2334 if (err) {
2349 netdev_err(netdev, "register netdevice failed\n"); 2335 netdev_err(netdev, "register netdevice failed\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 568205436a15..91ecd6a00d05 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -2139,12 +2139,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
2139 break; 2139 break;
2140 default: 2140 default:
2141 BNX2X_ERR("Non valid capability ID\n"); 2141 BNX2X_ERR("Non valid capability ID\n");
2142 rval = -EINVAL; 2142 rval = 1;
2143 break; 2143 break;
2144 } 2144 }
2145 } else { 2145 } else {
2146 DP(BNX2X_MSG_DCB, "DCB disabled\n"); 2146 DP(BNX2X_MSG_DCB, "DCB disabled\n");
2147 rval = -EINVAL; 2147 rval = 1;
2148 } 2148 }
2149 2149
2150 DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap); 2150 DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
@@ -2170,12 +2170,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
2170 break; 2170 break;
2171 default: 2171 default:
2172 BNX2X_ERR("Non valid TC-ID\n"); 2172 BNX2X_ERR("Non valid TC-ID\n");
2173 rval = -EINVAL; 2173 rval = 1;
2174 break; 2174 break;
2175 } 2175 }
2176 } else { 2176 } else {
2177 DP(BNX2X_MSG_DCB, "DCB disabled\n"); 2177 DP(BNX2X_MSG_DCB, "DCB disabled\n");
2178 rval = -EINVAL; 2178 rval = 1;
2179 } 2179 }
2180 2180
2181 return rval; 2181 return rval;
@@ -2188,7 +2188,7 @@ static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
2188 return -EINVAL; 2188 return -EINVAL;
2189} 2189}
2190 2190
2191static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) 2191static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
2192{ 2192{
2193 struct bnx2x *bp = netdev_priv(netdev); 2193 struct bnx2x *bp = netdev_priv(netdev);
2194 DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); 2194 DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
@@ -2390,12 +2390,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
2390 break; 2390 break;
2391 default: 2391 default:
2392 BNX2X_ERR("Non valid featrue-ID\n"); 2392 BNX2X_ERR("Non valid featrue-ID\n");
2393 rval = -EINVAL; 2393 rval = 1;
2394 break; 2394 break;
2395 } 2395 }
2396 } else { 2396 } else {
2397 DP(BNX2X_MSG_DCB, "DCB disabled\n"); 2397 DP(BNX2X_MSG_DCB, "DCB disabled\n");
2398 rval = -EINVAL; 2398 rval = 1;
2399 } 2399 }
2400 2400
2401 return rval; 2401 return rval;
@@ -2431,12 +2431,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
2431 break; 2431 break;
2432 default: 2432 default:
2433 BNX2X_ERR("Non valid featrue-ID\n"); 2433 BNX2X_ERR("Non valid featrue-ID\n");
2434 rval = -EINVAL; 2434 rval = 1;
2435 break; 2435 break;
2436 } 2436 }
2437 } else { 2437 } else {
2438 DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); 2438 DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
2439 rval = -EINVAL; 2439 rval = 1;
2440 } 2440 }
2441 2441
2442 return rval; 2442 return rval;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 77ebae0ac64a..0283f343b0d1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -13437,13 +13437,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13437{ 13437{
13438 struct bnx2x *bp = params->bp; 13438 struct bnx2x *bp = params->bp;
13439 u16 base_page, next_page, not_kr2_device, lane; 13439 u16 base_page, next_page, not_kr2_device, lane;
13440 int sigdet = bnx2x_warpcore_get_sigdet(phy, params); 13440 int sigdet;
13441
13442 if (!sigdet) {
13443 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
13444 bnx2x_kr2_recovery(params, vars, phy);
13445 return;
13446 }
13447 13441
13448 /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery 13442 /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
13449 * since some switches tend to reinit the AN process and clear the 13443 * since some switches tend to reinit the AN process and clear the
@@ -13454,6 +13448,16 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13454 vars->check_kr2_recovery_cnt--; 13448 vars->check_kr2_recovery_cnt--;
13455 return; 13449 return;
13456 } 13450 }
13451
13452 sigdet = bnx2x_warpcore_get_sigdet(phy, params);
13453 if (!sigdet) {
13454 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
13455 bnx2x_kr2_recovery(params, vars, phy);
13456 DP(NETIF_MSG_LINK, "No sigdet\n");
13457 }
13458 return;
13459 }
13460
13457 lane = bnx2x_get_warpcore_lane(phy, params); 13461 lane = bnx2x_get_warpcore_lane(phy, params);
13458 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 13462 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
13459 MDIO_AER_BLOCK_AER_REG, lane); 13463 MDIO_AER_BLOCK_AER_REG, lane);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e81a747ea8ce..8e58da909f5c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4947 q); 4947 q);
4948 } 4948 }
4949 4949
4950 if (!NO_FCOE(bp)) { 4950 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
4951 fp = &bp->fp[FCOE_IDX(bp)]; 4951 fp = &bp->fp[FCOE_IDX(bp)];
4952 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 4952 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4953 4953
@@ -13354,6 +13354,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
13354 RCU_INIT_POINTER(bp->cnic_ops, NULL); 13354 RCU_INIT_POINTER(bp->cnic_ops, NULL);
13355 mutex_unlock(&bp->cnic_mutex); 13355 mutex_unlock(&bp->cnic_mutex);
13356 synchronize_rcu(); 13356 synchronize_rcu();
13357 bp->cnic_enabled = false;
13357 kfree(bp->cnic_kwq); 13358 kfree(bp->cnic_kwq);
13358 bp->cnic_kwq = NULL; 13359 bp->cnic_kwq = NULL;
13359 13360
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 67d2663b3974..17a972734ba7 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14604,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp)
14604 if (j + len > block_end) 14604 if (j + len > block_end)
14605 goto partno; 14605 goto partno;
14606 14606
14607 memcpy(tp->fw_ver, &vpd_data[j], len); 14607 if (len >= sizeof(tp->fw_ver))
14608 strncat(tp->fw_ver, " bc ", vpdlen - len - 1); 14608 len = sizeof(tp->fw_ver) - 1;
14609 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
14610 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
14611 &vpd_data[j]);
14609 } 14612 }
14610 14613
14611partno: 14614partno:
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index a170065b5973..b0ebc9f6d55e 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -163,6 +163,7 @@
163#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ 163#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
164 164
165/* XGMAC_INT_STAT reg */ 165/* XGMAC_INT_STAT reg */
166#define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */
166#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ 167#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
167#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ 168#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
168 169
@@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev)
960 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); 961 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
961 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); 962 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
962 963
964 /* Mask power mgt interrupt */
965 writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
966
963 /* XGMAC requires AXI bus init. This is a 'magic number' for now */ 967 /* XGMAC requires AXI bus init. This is a 'magic number' for now */
964 writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); 968 writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
965 969
@@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
1141 struct sk_buff *skb; 1145 struct sk_buff *skb;
1142 int frame_len; 1146 int frame_len;
1143 1147
1148 if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
1149 break;
1150
1144 entry = priv->rx_tail; 1151 entry = priv->rx_tail;
1145 p = priv->dma_rx + entry; 1152 p = priv->dma_rx + entry;
1146 if (desc_get_owner(p)) 1153 if (desc_get_owner(p))
@@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
1825 unsigned int pmt = 0; 1832 unsigned int pmt = 0;
1826 1833
1827 if (mode & WAKE_MAGIC) 1834 if (mode & WAKE_MAGIC)
1828 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT; 1835 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
1829 if (mode & WAKE_UCAST) 1836 if (mode & WAKE_UCAST)
1830 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; 1837 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
1831 1838
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8cdf02503d13..9eada8e86078 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
257 tmp = readl(reg); 257 tmp = readl(reg);
258} 258}
259 259
260/*
261 * Sleep, either by using msleep() or if we are suspending, then
262 * use mdelay() to sleep.
263 */
264static void dm9000_msleep(board_info_t *db, unsigned int ms)
265{
266 if (db->in_suspend)
267 mdelay(ms);
268 else
269 msleep(ms);
270}
271
272/* Read a word from phyxcer */
273static int
274dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
275{
276 board_info_t *db = netdev_priv(dev);
277 unsigned long flags;
278 unsigned int reg_save;
279 int ret;
280
281 mutex_lock(&db->addr_lock);
282
283 spin_lock_irqsave(&db->lock, flags);
284
285 /* Save previous register address */
286 reg_save = readb(db->io_addr);
287
288 /* Fill the phyxcer register into REG_0C */
289 iow(db, DM9000_EPAR, DM9000_PHY | reg);
290
291 /* Issue phyxcer read command */
292 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
293
294 writeb(reg_save, db->io_addr);
295 spin_unlock_irqrestore(&db->lock, flags);
296
297 dm9000_msleep(db, 1); /* Wait read complete */
298
299 spin_lock_irqsave(&db->lock, flags);
300 reg_save = readb(db->io_addr);
301
302 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
303
304 /* The read data keeps on REG_0D & REG_0E */
305 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
306
307 /* restore the previous address */
308 writeb(reg_save, db->io_addr);
309 spin_unlock_irqrestore(&db->lock, flags);
310
311 mutex_unlock(&db->addr_lock);
312
313 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
314 return ret;
315}
316
317/* Write a word to phyxcer */
318static void
319dm9000_phy_write(struct net_device *dev,
320 int phyaddr_unused, int reg, int value)
321{
322 board_info_t *db = netdev_priv(dev);
323 unsigned long flags;
324 unsigned long reg_save;
325
326 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
327 mutex_lock(&db->addr_lock);
328
329 spin_lock_irqsave(&db->lock, flags);
330
331 /* Save previous register address */
332 reg_save = readb(db->io_addr);
333
334 /* Fill the phyxcer register into REG_0C */
335 iow(db, DM9000_EPAR, DM9000_PHY | reg);
336
337 /* Fill the written data into REG_0D & REG_0E */
338 iow(db, DM9000_EPDRL, value);
339 iow(db, DM9000_EPDRH, value >> 8);
340
341 /* Issue phyxcer write command */
342 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
343
344 writeb(reg_save, db->io_addr);
345 spin_unlock_irqrestore(&db->lock, flags);
346
347 dm9000_msleep(db, 1); /* Wait write complete */
348
349 spin_lock_irqsave(&db->lock, flags);
350 reg_save = readb(db->io_addr);
351
352 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
353
354 /* restore the previous address */
355 writeb(reg_save, db->io_addr);
356
357 spin_unlock_irqrestore(&db->lock, flags);
358 mutex_unlock(&db->addr_lock);
359}
360
260/* dm9000_set_io 361/* dm9000_set_io
261 * 362 *
262 * select the specified set of io routines to use with the 363 * select the specified set of io routines to use with the
@@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev)
795 896
796 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 897 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
797 898
899 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
900 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
901
798 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; 902 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
799 903
800 /* if wol is needed, then always set NCR_WAKEEN otherwise we end 904 /* if wol is needed, then always set NCR_WAKEEN otherwise we end
@@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev)
1201 return 0; 1305 return 0;
1202} 1306}
1203 1307
1204/*
1205 * Sleep, either by using msleep() or if we are suspending, then
1206 * use mdelay() to sleep.
1207 */
1208static void dm9000_msleep(board_info_t *db, unsigned int ms)
1209{
1210 if (db->in_suspend)
1211 mdelay(ms);
1212 else
1213 msleep(ms);
1214}
1215
1216/*
1217 * Read a word from phyxcer
1218 */
1219static int
1220dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1221{
1222 board_info_t *db = netdev_priv(dev);
1223 unsigned long flags;
1224 unsigned int reg_save;
1225 int ret;
1226
1227 mutex_lock(&db->addr_lock);
1228
1229 spin_lock_irqsave(&db->lock,flags);
1230
1231 /* Save previous register address */
1232 reg_save = readb(db->io_addr);
1233
1234 /* Fill the phyxcer register into REG_0C */
1235 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1236
1237 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
1238
1239 writeb(reg_save, db->io_addr);
1240 spin_unlock_irqrestore(&db->lock,flags);
1241
1242 dm9000_msleep(db, 1); /* Wait read complete */
1243
1244 spin_lock_irqsave(&db->lock,flags);
1245 reg_save = readb(db->io_addr);
1246
1247 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
1248
1249 /* The read data keeps on REG_0D & REG_0E */
1250 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
1251
1252 /* restore the previous address */
1253 writeb(reg_save, db->io_addr);
1254 spin_unlock_irqrestore(&db->lock,flags);
1255
1256 mutex_unlock(&db->addr_lock);
1257
1258 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
1259 return ret;
1260}
1261
1262/*
1263 * Write a word to phyxcer
1264 */
1265static void
1266dm9000_phy_write(struct net_device *dev,
1267 int phyaddr_unused, int reg, int value)
1268{
1269 board_info_t *db = netdev_priv(dev);
1270 unsigned long flags;
1271 unsigned long reg_save;
1272
1273 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
1274 mutex_lock(&db->addr_lock);
1275
1276 spin_lock_irqsave(&db->lock,flags);
1277
1278 /* Save previous register address */
1279 reg_save = readb(db->io_addr);
1280
1281 /* Fill the phyxcer register into REG_0C */
1282 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1283
1284 /* Fill the written data into REG_0D & REG_0E */
1285 iow(db, DM9000_EPDRL, value);
1286 iow(db, DM9000_EPDRH, value >> 8);
1287
1288 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
1289
1290 writeb(reg_save, db->io_addr);
1291 spin_unlock_irqrestore(&db->lock, flags);
1292
1293 dm9000_msleep(db, 1); /* Wait write complete */
1294
1295 spin_lock_irqsave(&db->lock,flags);
1296 reg_save = readb(db->io_addr);
1297
1298 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
1299
1300 /* restore the previous address */
1301 writeb(reg_save, db->io_addr);
1302
1303 spin_unlock_irqrestore(&db->lock, flags);
1304 mutex_unlock(&db->addr_lock);
1305}
1306
1307static void 1308static void
1308dm9000_shutdown(struct net_device *dev) 1309dm9000_shutdown(struct net_device *dev)
1309{ 1310{
@@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev)
1502 db->flags |= DM9000_PLATF_SIMPLE_PHY; 1503 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1503#endif 1504#endif
1504 1505
1505 dm9000_reset(db); 1506 /* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
1507 * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
1508 * while probe stage.
1509 */
1510
1511 iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
1506 1512
1507 /* try multiple times, DM9000 sometimes gets the read wrong */ 1513 /* try multiple times, DM9000 sometimes gets the read wrong */
1508 for (i = 0; i < 8; i++) { 1514 for (i = 0; i < 8; i++) {
diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h
index 55688bd1a3ef..9ce058adabab 100644
--- a/drivers/net/ethernet/davicom/dm9000.h
+++ b/drivers/net/ethernet/davicom/dm9000.h
@@ -69,7 +69,9 @@
69#define NCR_WAKEEN (1<<6) 69#define NCR_WAKEEN (1<<6)
70#define NCR_FCOL (1<<4) 70#define NCR_FCOL (1<<4)
71#define NCR_FDX (1<<3) 71#define NCR_FDX (1<<3)
72#define NCR_LBK (3<<1) 72
73#define NCR_RESERVED (3<<1)
74#define NCR_MAC_LBK (1<<1)
73#define NCR_RST (1<<0) 75#define NCR_RST (1<<0)
74 76
75#define NSR_SPEED (1<<7) 77#define NSR_SPEED (1<<7)
@@ -167,5 +169,12 @@
167#define ISR_LNKCHNG (1<<5) 169#define ISR_LNKCHNG (1<<5)
168#define ISR_UNDERRUN (1<<4) 170#define ISR_UNDERRUN (1<<4)
169 171
172/* Davicom MII registers.
173 */
174
175#define MII_DM_DSPCR 0x1b /* DSP Control Register */
176
177#define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */
178
170#endif /* _DM9000X_H_ */ 179#endif /* _DM9000X_H_ */
171 180
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index e3f39372ce25..f292c3aa423f 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
345 return NETDEV_TX_OK; 345 return NETDEV_TX_OK;
346} 346}
347 347
348/* Init RX & TX buffer descriptors
349 */
350static void fec_enet_bd_init(struct net_device *dev)
351{
352 struct fec_enet_private *fep = netdev_priv(dev);
353 struct bufdesc *bdp;
354 unsigned int i;
355
356 /* Initialize the receive buffer descriptors. */
357 bdp = fep->rx_bd_base;
358 for (i = 0; i < RX_RING_SIZE; i++) {
359
360 /* Initialize the BD for every fragment in the page. */
361 if (bdp->cbd_bufaddr)
362 bdp->cbd_sc = BD_ENET_RX_EMPTY;
363 else
364 bdp->cbd_sc = 0;
365 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
366 }
367
368 /* Set the last buffer to wrap */
369 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
370 bdp->cbd_sc |= BD_SC_WRAP;
371
372 fep->cur_rx = fep->rx_bd_base;
373
374 /* ...and the same for transmit */
375 bdp = fep->tx_bd_base;
376 fep->cur_tx = bdp;
377 for (i = 0; i < TX_RING_SIZE; i++) {
378
379 /* Initialize the BD for every fragment in the page. */
380 bdp->cbd_sc = 0;
381 if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
382 dev_kfree_skb_any(fep->tx_skbuff[i]);
383 fep->tx_skbuff[i] = NULL;
384 }
385 bdp->cbd_bufaddr = 0;
386 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
387 }
388
389 /* Set the last buffer to wrap */
390 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
391 bdp->cbd_sc |= BD_SC_WRAP;
392 fep->dirty_tx = bdp;
393}
394
348/* This function is called to start or restart the FEC during a link 395/* This function is called to start or restart the FEC during a link
349 * change. This only happens when switching between half and full 396 * change. This only happens when switching between half and full
350 * duplex. 397 * duplex.
@@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex)
388 /* Set maximum receive buffer size. */ 435 /* Set maximum receive buffer size. */
389 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); 436 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
390 437
438 fec_enet_bd_init(ndev);
439
391 /* Set receive and transmit descriptor base. */ 440 /* Set receive and transmit descriptor base. */
392 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); 441 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
393 if (fep->bufdesc_ex) 442 if (fep->bufdesc_ex)
@@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex)
397 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) 446 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
398 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 447 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
399 448
400 fep->cur_rx = fep->rx_bd_base;
401 449
402 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 450 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
403 if (fep->tx_skbuff[i]) { 451 if (fep->tx_skbuff[i]) {
@@ -1332,7 +1380,7 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1332static void fec_enet_free_buffers(struct net_device *ndev) 1380static void fec_enet_free_buffers(struct net_device *ndev)
1333{ 1381{
1334 struct fec_enet_private *fep = netdev_priv(ndev); 1382 struct fec_enet_private *fep = netdev_priv(ndev);
1335 int i; 1383 unsigned int i;
1336 struct sk_buff *skb; 1384 struct sk_buff *skb;
1337 struct bufdesc *bdp; 1385 struct bufdesc *bdp;
1338 1386
@@ -1356,7 +1404,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
1356static int fec_enet_alloc_buffers(struct net_device *ndev) 1404static int fec_enet_alloc_buffers(struct net_device *ndev)
1357{ 1405{
1358 struct fec_enet_private *fep = netdev_priv(ndev); 1406 struct fec_enet_private *fep = netdev_priv(ndev);
1359 int i; 1407 unsigned int i;
1360 struct sk_buff *skb; 1408 struct sk_buff *skb;
1361 struct bufdesc *bdp; 1409 struct bufdesc *bdp;
1362 1410
@@ -1597,8 +1645,6 @@ static int fec_enet_init(struct net_device *ndev)
1597{ 1645{
1598 struct fec_enet_private *fep = netdev_priv(ndev); 1646 struct fec_enet_private *fep = netdev_priv(ndev);
1599 struct bufdesc *cbd_base; 1647 struct bufdesc *cbd_base;
1600 struct bufdesc *bdp;
1601 int i;
1602 1648
1603 /* Allocate memory for buffer descriptors. */ 1649 /* Allocate memory for buffer descriptors. */
1604 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, 1650 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
@@ -1608,6 +1654,7 @@ static int fec_enet_init(struct net_device *ndev)
1608 return -ENOMEM; 1654 return -ENOMEM;
1609 } 1655 }
1610 1656
1657 memset(cbd_base, 0, PAGE_SIZE);
1611 spin_lock_init(&fep->hw_lock); 1658 spin_lock_init(&fep->hw_lock);
1612 1659
1613 fep->netdev = ndev; 1660 fep->netdev = ndev;
@@ -1631,35 +1678,6 @@ static int fec_enet_init(struct net_device *ndev)
1631 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 1678 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
1632 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); 1679 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
1633 1680
1634 /* Initialize the receive buffer descriptors. */
1635 bdp = fep->rx_bd_base;
1636 for (i = 0; i < RX_RING_SIZE; i++) {
1637
1638 /* Initialize the BD for every fragment in the page. */
1639 bdp->cbd_sc = 0;
1640 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
1641 }
1642
1643 /* Set the last buffer to wrap */
1644 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
1645 bdp->cbd_sc |= BD_SC_WRAP;
1646
1647 /* ...and the same for transmit */
1648 bdp = fep->tx_bd_base;
1649 fep->cur_tx = bdp;
1650 for (i = 0; i < TX_RING_SIZE; i++) {
1651
1652 /* Initialize the BD for every fragment in the page. */
1653 bdp->cbd_sc = 0;
1654 bdp->cbd_bufaddr = 0;
1655 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
1656 }
1657
1658 /* Set the last buffer to wrap */
1659 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
1660 bdp->cbd_sc |= BD_SC_WRAP;
1661 fep->dirty_tx = bdp;
1662
1663 fec_restart(ndev, 0); 1681 fec_restart(ndev, 0);
1664 1682
1665 return 0; 1683 return 0;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 1f17ca0f2201..0d8df400a479 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -128,6 +128,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
128 128
129 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 129 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
130} 130}
131EXPORT_SYMBOL(fec_ptp_start_cyclecounter);
131 132
132/** 133/**
133 * fec_ptp_adjfreq - adjust ptp cycle frequency 134 * fec_ptp_adjfreq - adjust ptp cycle frequency
@@ -318,6 +319,7 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
318 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 319 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
319 -EFAULT : 0; 320 -EFAULT : 0;
320} 321}
322EXPORT_SYMBOL(fec_ptp_ioctl);
321 323
322/** 324/**
323 * fec_time_keep - call timecounter_read every second to avoid timer overrun 325 * fec_time_keep - call timecounter_read every second to avoid timer overrun
@@ -383,3 +385,4 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
383 pr_info("registered PHC device on %s\n", ndev->name); 385 pr_info("registered PHC device on %s\n", ndev->name);
384 } 386 }
385} 387}
388EXPORT_SYMBOL(fec_ptp_init);
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ec800b093e7e..d2bea3f07c73 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -870,7 +870,7 @@ err_unlock:
870} 870}
871 871
872static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, 872static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
873 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) 873 int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
874{ 874{
875 struct cb *cb; 875 struct cb *cb;
876 unsigned long flags; 876 unsigned long flags;
@@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
888 nic->cbs_avail--; 888 nic->cbs_avail--;
889 cb->skb = skb; 889 cb->skb = skb;
890 890
891 err = cb_prepare(nic, cb, skb);
892 if (err)
893 goto err_unlock;
894
891 if (unlikely(!nic->cbs_avail)) 895 if (unlikely(!nic->cbs_avail))
892 err = -ENOSPC; 896 err = -ENOSPC;
893 897
894 cb_prepare(nic, cb, skb);
895 898
896 /* Order is important otherwise we'll be in a race with h/w: 899 /* Order is important otherwise we'll be in a race with h/w:
897 * set S-bit in current first, then clear S-bit in previous. */ 900 * set S-bit in current first, then clear S-bit in previous. */
@@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic)
1091 nic->mii.mdio_write = mdio_write; 1094 nic->mii.mdio_write = mdio_write;
1092} 1095}
1093 1096
1094static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1097static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1095{ 1098{
1096 struct config *config = &cb->u.config; 1099 struct config *config = &cb->u.config;
1097 u8 *c = (u8 *)config; 1100 u8 *c = (u8 *)config;
@@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1181 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, 1184 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1182 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1185 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1183 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); 1186 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1187 return 0;
1184} 1188}
1185 1189
1186/************************************************************************* 1190/*************************************************************************
@@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1331 return fw; 1335 return fw;
1332} 1336}
1333 1337
1334static void e100_setup_ucode(struct nic *nic, struct cb *cb, 1338static int e100_setup_ucode(struct nic *nic, struct cb *cb,
1335 struct sk_buff *skb) 1339 struct sk_buff *skb)
1336{ 1340{
1337 const struct firmware *fw = (void *)skb; 1341 const struct firmware *fw = (void *)skb;
@@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1358 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); 1362 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1359 1363
1360 cb->command = cpu_to_le16(cb_ucode | cb_el); 1364 cb->command = cpu_to_le16(cb_ucode | cb_el);
1365 return 0;
1361} 1366}
1362 1367
1363static inline int e100_load_ucode_wait(struct nic *nic) 1368static inline int e100_load_ucode_wait(struct nic *nic)
@@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic)
1400 return err; 1405 return err;
1401} 1406}
1402 1407
1403static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, 1408static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1404 struct sk_buff *skb) 1409 struct sk_buff *skb)
1405{ 1410{
1406 cb->command = cpu_to_le16(cb_iaaddr); 1411 cb->command = cpu_to_le16(cb_iaaddr);
1407 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); 1412 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1413 return 0;
1408} 1414}
1409 1415
1410static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1416static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1411{ 1417{
1412 cb->command = cpu_to_le16(cb_dump); 1418 cb->command = cpu_to_le16(cb_dump);
1413 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + 1419 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1414 offsetof(struct mem, dump_buf)); 1420 offsetof(struct mem, dump_buf));
1421 return 0;
1415} 1422}
1416 1423
1417static int e100_phy_check_without_mii(struct nic *nic) 1424static int e100_phy_check_without_mii(struct nic *nic)
@@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic)
1581 return 0; 1588 return 0;
1582} 1589}
1583 1590
1584static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1591static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1585{ 1592{
1586 struct net_device *netdev = nic->netdev; 1593 struct net_device *netdev = nic->netdev;
1587 struct netdev_hw_addr *ha; 1594 struct netdev_hw_addr *ha;
@@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1596 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, 1603 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1597 ETH_ALEN); 1604 ETH_ALEN);
1598 } 1605 }
1606 return 0;
1599} 1607}
1600 1608
1601static void e100_set_multicast_list(struct net_device *netdev) 1609static void e100_set_multicast_list(struct net_device *netdev)
@@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data)
1756 round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); 1764 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1757} 1765}
1758 1766
1759static void e100_xmit_prepare(struct nic *nic, struct cb *cb, 1767static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
1760 struct sk_buff *skb) 1768 struct sk_buff *skb)
1761{ 1769{
1770 dma_addr_t dma_addr;
1762 cb->command = nic->tx_command; 1771 cb->command = nic->tx_command;
1763 1772
1773 dma_addr = pci_map_single(nic->pdev,
1774 skb->data, skb->len, PCI_DMA_TODEVICE);
1775 /* If we can't map the skb, have the upper layer try later */
1776 if (pci_dma_mapping_error(nic->pdev, dma_addr))
1777 return -ENOMEM;
1778
1764 /* 1779 /*
1765 * Use the last 4 bytes of the SKB payload packet as the CRC, used for 1780 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
1766 * testing, ie sending frames with bad CRC. 1781 * testing, ie sending frames with bad CRC.
@@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1777 cb->u.tcb.tcb_byte_count = 0; 1792 cb->u.tcb.tcb_byte_count = 0;
1778 cb->u.tcb.threshold = nic->tx_threshold; 1793 cb->u.tcb.threshold = nic->tx_threshold;
1779 cb->u.tcb.tbd_count = 1; 1794 cb->u.tcb.tbd_count = 1;
1780 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, 1795 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
1781 skb->data, skb->len, PCI_DMA_TODEVICE));
1782 /* check for mapping failure? */
1783 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); 1796 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1784 skb_tx_timestamp(skb); 1797 skb_tx_timestamp(skb);
1798 return 0;
1785} 1799}
1786 1800
1787static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, 1801static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 43462d596a4e..ffd287196bf8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1053,6 +1053,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1053 txdr->buffer_info[i].dma = 1053 txdr->buffer_info[i].dma =
1054 dma_map_single(&pdev->dev, skb->data, skb->len, 1054 dma_map_single(&pdev->dev, skb->data, skb->len,
1055 DMA_TO_DEVICE); 1055 DMA_TO_DEVICE);
1056 if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) {
1057 ret_val = 4;
1058 goto err_nomem;
1059 }
1056 tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); 1060 tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
1057 tx_desc->lower.data = cpu_to_le32(skb->len); 1061 tx_desc->lower.data = cpu_to_le32(skb->len);
1058 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | 1062 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@@ -1069,7 +1073,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1069 rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), 1073 rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
1070 GFP_KERNEL); 1074 GFP_KERNEL);
1071 if (!rxdr->buffer_info) { 1075 if (!rxdr->buffer_info) {
1072 ret_val = 4; 1076 ret_val = 5;
1073 goto err_nomem; 1077 goto err_nomem;
1074 } 1078 }
1075 1079
@@ -1077,7 +1081,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1077 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1081 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1078 GFP_KERNEL); 1082 GFP_KERNEL);
1079 if (!rxdr->desc) { 1083 if (!rxdr->desc) {
1080 ret_val = 5; 1084 ret_val = 6;
1081 goto err_nomem; 1085 goto err_nomem;
1082 } 1086 }
1083 memset(rxdr->desc, 0, rxdr->size); 1087 memset(rxdr->desc, 0, rxdr->size);
@@ -1101,7 +1105,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1101 1105
1102 skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); 1106 skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
1103 if (!skb) { 1107 if (!skb) {
1104 ret_val = 6; 1108 ret_val = 7;
1105 goto err_nomem; 1109 goto err_nomem;
1106 } 1110 }
1107 skb_reserve(skb, NET_IP_ALIGN); 1111 skb_reserve(skb, NET_IP_ALIGN);
@@ -1110,6 +1114,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1110 rxdr->buffer_info[i].dma = 1114 rxdr->buffer_info[i].dma =
1111 dma_map_single(&pdev->dev, skb->data, 1115 dma_map_single(&pdev->dev, skb->data,
1112 E1000_RXBUFFER_2048, DMA_FROM_DEVICE); 1116 E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
1117 if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
1118 ret_val = 8;
1119 goto err_nomem;
1120 }
1113 rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); 1121 rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
1114 memset(skb->data, 0x00, skb->len); 1122 memset(skb->data, 0x00, skb->len);
1115 } 1123 }
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 948b86ffa4f0..7e615e2bf7e6 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -848,11 +848,16 @@ check_page:
848 } 848 }
849 } 849 }
850 850
851 if (!buffer_info->dma) 851 if (!buffer_info->dma) {
852 buffer_info->dma = dma_map_page(&pdev->dev, 852 buffer_info->dma = dma_map_page(&pdev->dev,
853 buffer_info->page, 0, 853 buffer_info->page, 0,
854 PAGE_SIZE, 854 PAGE_SIZE,
855 DMA_FROM_DEVICE); 855 DMA_FROM_DEVICE);
856 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
857 adapter->alloc_rx_buff_failed++;
858 break;
859 }
860 }
856 861
857 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 862 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
858 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 863 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index b64542acfa34..12b1d8480808 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1818,27 +1818,32 @@ out:
1818 **/ 1818 **/
1819void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) 1819void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
1820{ 1820{
1821 u32 dtxswc; 1821 u32 reg_val, reg_offset;
1822 1822
1823 switch (hw->mac.type) { 1823 switch (hw->mac.type) {
1824 case e1000_82576: 1824 case e1000_82576:
1825 reg_offset = E1000_DTXSWC;
1826 break;
1825 case e1000_i350: 1827 case e1000_i350:
1826 dtxswc = rd32(E1000_DTXSWC); 1828 reg_offset = E1000_TXSWC;
1827 if (enable) {
1828 dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1829 E1000_DTXSWC_VLAN_SPOOF_MASK);
1830 /* The PF can spoof - it has to in order to
1831 * support emulation mode NICs */
1832 dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1833 } else {
1834 dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1835 E1000_DTXSWC_VLAN_SPOOF_MASK);
1836 }
1837 wr32(E1000_DTXSWC, dtxswc);
1838 break; 1829 break;
1839 default: 1830 default:
1840 break; 1831 return;
1832 }
1833
1834 reg_val = rd32(reg_offset);
1835 if (enable) {
1836 reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1837 E1000_DTXSWC_VLAN_SPOOF_MASK);
1838 /* The PF can spoof - it has to in order to
1839 * support emulation mode NICs
1840 */
1841 reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1842 } else {
1843 reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1844 E1000_DTXSWC_VLAN_SPOOF_MASK);
1841 } 1845 }
1846 wr32(reg_offset, reg_val);
1842} 1847}
1843 1848
1844/** 1849/**
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 4623502054d5..0478a1abe541 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -39,7 +39,7 @@
39#include <linux/pci.h> 39#include <linux/pci.h>
40 40
41#ifdef CONFIG_IGB_HWMON 41#ifdef CONFIG_IGB_HWMON
42struct i2c_board_info i350_sensor_info = { 42static struct i2c_board_info i350_sensor_info = {
43 I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), 43 I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
44}; 44};
45 45
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 4dbd62968c7a..8496adfc6a68 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2542,8 +2542,8 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
2542 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) 2542 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2543 return; 2543 return;
2544 2544
2545 igb_enable_sriov(pdev, max_vfs);
2546 pci_sriov_set_totalvfs(pdev, 7); 2545 pci_sriov_set_totalvfs(pdev, 7);
2546 igb_enable_sriov(pdev, max_vfs);
2547 2547
2548#endif /* CONFIG_PCI_IOV */ 2548#endif /* CONFIG_PCI_IOV */
2549} 2549}
@@ -2652,7 +2652,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
2652 if (max_vfs > 7) { 2652 if (max_vfs > 7) {
2653 dev_warn(&pdev->dev, 2653 dev_warn(&pdev->dev,
2654 "Maximum of 7 VFs per PF, using max\n"); 2654 "Maximum of 7 VFs per PF, using max\n");
2655 adapter->vfs_allocated_count = 7; 2655 max_vfs = adapter->vfs_allocated_count = 7;
2656 } else 2656 } else
2657 adapter->vfs_allocated_count = max_vfs; 2657 adapter->vfs_allocated_count = max_vfs;
2658 if (adapter->vfs_allocated_count) 2658 if (adapter->vfs_allocated_count)
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 0987822359f0..0a237507ee85 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -740,7 +740,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
740 case e1000_82576: 740 case e1000_82576:
741 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); 741 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
742 adapter->ptp_caps.owner = THIS_MODULE; 742 adapter->ptp_caps.owner = THIS_MODULE;
743 adapter->ptp_caps.max_adj = 1000000000; 743 adapter->ptp_caps.max_adj = 999999881;
744 adapter->ptp_caps.n_ext_ts = 0; 744 adapter->ptp_caps.n_ext_ts = 0;
745 adapter->ptp_caps.pps = 0; 745 adapter->ptp_caps.pps = 0;
746 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; 746 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index ea4808373435..b5f94abe3cff 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -2159,6 +2159,10 @@ map_skb:
2159 skb->data, 2159 skb->data,
2160 adapter->rx_buffer_len, 2160 adapter->rx_buffer_len,
2161 DMA_FROM_DEVICE); 2161 DMA_FROM_DEVICE);
2162 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
2163 adapter->alloc_rx_buff_failed++;
2164 break;
2165 }
2162 2166
2163 rx_desc = IXGB_RX_DESC(*rx_ring, i); 2167 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2164 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 2168 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -2168,7 +2172,8 @@ map_skb:
2168 rx_desc->status = 0; 2172 rx_desc->status = 0;
2169 2173
2170 2174
2171 if (++i == rx_ring->count) i = 0; 2175 if (++i == rx_ring->count)
2176 i = 0;
2172 buffer_info = &rx_ring->buffer_info[i]; 2177 buffer_info = &rx_ring->buffer_info[i];
2173 } 2178 }
2174 2179
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index db5611ae407e..79f4a26ea6cc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7922,12 +7922,19 @@ static int __init ixgbe_init_module(void)
7922 ixgbe_dbg_init(); 7922 ixgbe_dbg_init();
7923#endif /* CONFIG_DEBUG_FS */ 7923#endif /* CONFIG_DEBUG_FS */
7924 7924
7925 ret = pci_register_driver(&ixgbe_driver);
7926 if (ret) {
7927#ifdef CONFIG_DEBUG_FS
7928 ixgbe_dbg_exit();
7929#endif /* CONFIG_DEBUG_FS */
7930 return ret;
7931 }
7932
7925#ifdef CONFIG_IXGBE_DCA 7933#ifdef CONFIG_IXGBE_DCA
7926 dca_register_notify(&dca_notifier); 7934 dca_register_notify(&dca_notifier);
7927#endif 7935#endif
7928 7936
7929 ret = pci_register_driver(&ixgbe_driver); 7937 return 0;
7930 return ret;
7931} 7938}
7932 7939
7933module_init(ixgbe_init_module); 7940module_init(ixgbe_init_module);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index c3db6cd69b68..2b6cb5ca48ee 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -944,9 +944,17 @@ free_queue_irqs:
944 free_irq(adapter->msix_entries[vector].vector, 944 free_irq(adapter->msix_entries[vector].vector,
945 adapter->q_vector[vector]); 945 adapter->q_vector[vector]);
946 } 946 }
947 pci_disable_msix(adapter->pdev); 947 /* This failure is non-recoverable - it indicates the system is
948 kfree(adapter->msix_entries); 948 * out of MSIX vector resources and the VF driver cannot run
949 adapter->msix_entries = NULL; 949 * without them. Set the number of msix vectors to zero
950 * indicating that not enough can be allocated. The error
951 * will be returned to the user indicating device open failed.
952 * Any further attempts to force the driver to open will also
953 * fail. The only way to recover is to unload the driver and
954 * reload it again. If the system has recovered some MSIX
955 * vectors then it may succeed.
956 */
957 adapter->num_msix_vectors = 0;
950 return err; 958 return err;
951} 959}
952 960
@@ -2572,6 +2580,15 @@ static int ixgbevf_open(struct net_device *netdev)
2572 struct ixgbe_hw *hw = &adapter->hw; 2580 struct ixgbe_hw *hw = &adapter->hw;
2573 int err; 2581 int err;
2574 2582
2583 /* A previous failure to open the device because of a lack of
2584 * available MSIX vector resources may have reset the number
2585 * of msix vectors variable to zero. The only way to recover
2586 * is to unload/reload the driver and hope that the system has
2587 * been able to recover some MSIX vector resources.
2588 */
2589 if (!adapter->num_msix_vectors)
2590 return -ENOMEM;
2591
2575 /* disallow open during test */ 2592 /* disallow open during test */
2576 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2593 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2577 return -EBUSY; 2594 return -EBUSY;
@@ -2628,7 +2645,6 @@ static int ixgbevf_open(struct net_device *netdev)
2628 2645
2629err_req_irq: 2646err_req_irq:
2630 ixgbevf_down(adapter); 2647 ixgbevf_down(adapter);
2631 ixgbevf_free_irq(adapter);
2632err_setup_rx: 2648err_setup_rx:
2633 ixgbevf_free_all_rx_resources(adapter); 2649 ixgbevf_free_all_rx_resources(adapter);
2634err_setup_tx: 2650err_setup_tx:
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6a2127489af7..bfdb06860397 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -769,7 +769,7 @@ ltq_etop_probe(struct platform_device *pdev)
769 return 0; 769 return 0;
770 770
771err_free: 771err_free:
772 kfree(dev); 772 free_netdev(dev);
773err_out: 773err_out:
774 return err; 774 return err;
775} 775}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index cd345b8969bc..1e628ce57201 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2771,16 +2771,17 @@ static int mvneta_probe(struct platform_device *pdev)
2771 2771
2772 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); 2772 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
2773 2773
2774 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2775 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2776 dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2777 dev->priv_flags |= IFF_UNICAST_FLT;
2778
2774 err = register_netdev(dev); 2779 err = register_netdev(dev);
2775 if (err < 0) { 2780 if (err < 0) {
2776 dev_err(&pdev->dev, "failed to register\n"); 2781 dev_err(&pdev->dev, "failed to register\n");
2777 goto err_deinit; 2782 goto err_deinit;
2778 } 2783 }
2779 2784
2780 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2781 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2782 dev->priv_flags |= IFF_UNICAST_FLT;
2783
2784 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 2785 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
2785 2786
2786 platform_set_drvdata(pdev, pp->dev); 2787 platform_set_drvdata(pdev, pp->dev);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index fc07ca35721b..6a0e671fcecd 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
1067 sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); 1067 sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
1068 sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); 1068 sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
1069 1069
1070 tp = space - 2048/8; 1070 tp = space - 8192/8;
1071 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); 1071 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
1072 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); 1072 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
1073 } else { 1073 } else {
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 615ac63ea860..ec6dcd80152b 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2074,7 +2074,7 @@ enum {
2074 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ 2074 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
2075 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ 2075 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
2076 2076
2077#define GMAC_DEF_MSK GM_IS_TX_FF_UR 2077#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
2078}; 2078};
2079 2079
2080/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ 2080/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 995d4b6d5c1e..30d78f806dc3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -411,8 +411,8 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
411 411
412static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) 412static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
413{ 413{
414 unsigned int i; 414 int i;
415 for (i = ETH_ALEN - 1; i; --i) { 415 for (i = ETH_ALEN - 1; i >= 0; --i) {
416 dst_mac[i] = src_mac & 0xff; 416 dst_mac[i] = src_mac & 0xff;
417 src_mac >>= 8; 417 src_mac >>= 8;
418 } 418 }
@@ -1637,6 +1637,17 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1637 /* Flush multicast filter */ 1637 /* Flush multicast filter */
1638 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1638 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1639 1639
1640 /* Remove flow steering rules for the port*/
1641 if (mdev->dev->caps.steering_mode ==
1642 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1643 ASSERT_RTNL();
1644 list_for_each_entry_safe(flow, tmp_flow,
1645 &priv->ethtool_list, list) {
1646 mlx4_flow_detach(mdev->dev, flow->id);
1647 list_del(&flow->list);
1648 }
1649 }
1650
1640 mlx4_en_destroy_drop_qp(priv); 1651 mlx4_en_destroy_drop_qp(priv);
1641 1652
1642 /* Free TX Rings */ 1653 /* Free TX Rings */
@@ -1657,17 +1668,6 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1657 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN)) 1668 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
1658 mdev->mac_removed[priv->port] = 1; 1669 mdev->mac_removed[priv->port] = 1;
1659 1670
1660 /* Remove flow steering rules for the port*/
1661 if (mdev->dev->caps.steering_mode ==
1662 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1663 ASSERT_RTNL();
1664 list_for_each_entry_safe(flow, tmp_flow,
1665 &priv->ethtool_list, list) {
1666 mlx4_flow_detach(mdev->dev, flow->id);
1667 list_del(&flow->list);
1668 }
1669 }
1670
1671 /* Free RX Rings */ 1671 /* Free RX Rings */
1672 for (i = 0; i < priv->rx_ring_num; i++) { 1672 for (i = 0; i < priv->rx_ring_num; i++) {
1673 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 1673 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 251ae2f93116..8e3123a1df88 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -771,7 +771,7 @@ int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
771 struct mlx4_slave_event_eq_info *event_eq = 771 struct mlx4_slave_event_eq_info *event_eq =
772 priv->mfunc.master.slave_state[slave].event_eq; 772 priv->mfunc.master.slave_state[slave].event_eq;
773 u32 in_modifier = vhcr->in_modifier; 773 u32 in_modifier = vhcr->in_modifier;
774 u32 eqn = in_modifier & 0x1FF; 774 u32 eqn = in_modifier & 0x3FF;
775 u64 in_param = vhcr->in_param; 775 u64 in_param = vhcr->in_param;
776 int err = 0; 776 int err = 0;
777 int i; 777 int i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 2995687f1aee..1391b52f443a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -99,6 +99,7 @@ struct res_qp {
99 struct list_head mcg_list; 99 struct list_head mcg_list;
100 spinlock_t mcg_spl; 100 spinlock_t mcg_spl;
101 int local_qpn; 101 int local_qpn;
102 atomic_t ref_count;
102}; 103};
103 104
104enum res_mtt_states { 105enum res_mtt_states {
@@ -197,6 +198,7 @@ enum res_fs_rule_states {
197 198
198struct res_fs_rule { 199struct res_fs_rule {
199 struct res_common com; 200 struct res_common com;
201 int qpn;
200}; 202};
201 203
202static void *res_tracker_lookup(struct rb_root *root, u64 res_id) 204static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
@@ -355,7 +357,7 @@ static int mpt_mask(struct mlx4_dev *dev)
355 return dev->caps.num_mpts - 1; 357 return dev->caps.num_mpts - 1;
356} 358}
357 359
358static void *find_res(struct mlx4_dev *dev, int res_id, 360static void *find_res(struct mlx4_dev *dev, u64 res_id,
359 enum mlx4_resource type) 361 enum mlx4_resource type)
360{ 362{
361 struct mlx4_priv *priv = mlx4_priv(dev); 363 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -447,6 +449,7 @@ static struct res_common *alloc_qp_tr(int id)
447 ret->local_qpn = id; 449 ret->local_qpn = id;
448 INIT_LIST_HEAD(&ret->mcg_list); 450 INIT_LIST_HEAD(&ret->mcg_list);
449 spin_lock_init(&ret->mcg_spl); 451 spin_lock_init(&ret->mcg_spl);
452 atomic_set(&ret->ref_count, 0);
450 453
451 return &ret->com; 454 return &ret->com;
452} 455}
@@ -554,7 +557,7 @@ static struct res_common *alloc_xrcdn_tr(int id)
554 return &ret->com; 557 return &ret->com;
555} 558}
556 559
557static struct res_common *alloc_fs_rule_tr(u64 id) 560static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
558{ 561{
559 struct res_fs_rule *ret; 562 struct res_fs_rule *ret;
560 563
@@ -564,7 +567,7 @@ static struct res_common *alloc_fs_rule_tr(u64 id)
564 567
565 ret->com.res_id = id; 568 ret->com.res_id = id;
566 ret->com.state = RES_FS_RULE_ALLOCATED; 569 ret->com.state = RES_FS_RULE_ALLOCATED;
567 570 ret->qpn = qpn;
568 return &ret->com; 571 return &ret->com;
569} 572}
570 573
@@ -602,7 +605,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
602 ret = alloc_xrcdn_tr(id); 605 ret = alloc_xrcdn_tr(id);
603 break; 606 break;
604 case RES_FS_RULE: 607 case RES_FS_RULE:
605 ret = alloc_fs_rule_tr(id); 608 ret = alloc_fs_rule_tr(id, extra);
606 break; 609 break;
607 default: 610 default:
608 return NULL; 611 return NULL;
@@ -671,10 +674,14 @@ undo:
671 674
672static int remove_qp_ok(struct res_qp *res) 675static int remove_qp_ok(struct res_qp *res)
673{ 676{
674 if (res->com.state == RES_QP_BUSY) 677 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
678 !list_empty(&res->mcg_list)) {
679 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
680 res->com.state, atomic_read(&res->ref_count));
675 return -EBUSY; 681 return -EBUSY;
676 else if (res->com.state != RES_QP_RESERVED) 682 } else if (res->com.state != RES_QP_RESERVED) {
677 return -EPERM; 683 return -EPERM;
684 }
678 685
679 return 0; 686 return 0;
680} 687}
@@ -3124,6 +3131,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3124 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; 3131 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3125 int err; 3132 int err;
3126 int qpn; 3133 int qpn;
3134 struct res_qp *rqp;
3127 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 3135 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3128 struct _rule_hw *rule_header; 3136 struct _rule_hw *rule_header;
3129 int header_id; 3137 int header_id;
@@ -3134,7 +3142,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3134 3142
3135 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 3143 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3136 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 3144 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3137 err = get_res(dev, slave, qpn, RES_QP, NULL); 3145 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3138 if (err) { 3146 if (err) {
3139 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); 3147 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3140 return err; 3148 return err;
@@ -3175,14 +3183,16 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3175 if (err) 3183 if (err)
3176 goto err_put; 3184 goto err_put;
3177 3185
3178 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0); 3186 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3179 if (err) { 3187 if (err) {
3180 mlx4_err(dev, "Fail to add flow steering resources.\n "); 3188 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3181 /* detach rule*/ 3189 /* detach rule*/
3182 mlx4_cmd(dev, vhcr->out_param, 0, 0, 3190 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3183 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 3191 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3184 MLX4_CMD_NATIVE); 3192 MLX4_CMD_NATIVE);
3193 goto err_put;
3185 } 3194 }
3195 atomic_inc(&rqp->ref_count);
3186err_put: 3196err_put:
3187 put_res(dev, slave, qpn, RES_QP); 3197 put_res(dev, slave, qpn, RES_QP);
3188 return err; 3198 return err;
@@ -3195,20 +3205,35 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3195 struct mlx4_cmd_info *cmd) 3205 struct mlx4_cmd_info *cmd)
3196{ 3206{
3197 int err; 3207 int err;
3208 struct res_qp *rqp;
3209 struct res_fs_rule *rrule;
3198 3210
3199 if (dev->caps.steering_mode != 3211 if (dev->caps.steering_mode !=
3200 MLX4_STEERING_MODE_DEVICE_MANAGED) 3212 MLX4_STEERING_MODE_DEVICE_MANAGED)
3201 return -EOPNOTSUPP; 3213 return -EOPNOTSUPP;
3202 3214
3215 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3216 if (err)
3217 return err;
3218 /* Release the rule form busy state before removal */
3219 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3220 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3221 if (err)
3222 return err;
3223
3203 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); 3224 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3204 if (err) { 3225 if (err) {
3205 mlx4_err(dev, "Fail to remove flow steering resources.\n "); 3226 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3206 return err; 3227 goto out;
3207 } 3228 }
3208 3229
3209 err = mlx4_cmd(dev, vhcr->in_param, 0, 0, 3230 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3210 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 3231 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3211 MLX4_CMD_NATIVE); 3232 MLX4_CMD_NATIVE);
3233 if (!err)
3234 atomic_dec(&rqp->ref_count);
3235out:
3236 put_res(dev, slave, rrule->qpn, RES_QP);
3212 return err; 3237 return err;
3213} 3238}
3214 3239
@@ -3806,6 +3831,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3806 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 3831 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3807 /*VLAN*/ 3832 /*VLAN*/
3808 rem_slave_macs(dev, slave); 3833 rem_slave_macs(dev, slave);
3834 rem_slave_fs_rule(dev, slave);
3809 rem_slave_qps(dev, slave); 3835 rem_slave_qps(dev, slave);
3810 rem_slave_srqs(dev, slave); 3836 rem_slave_srqs(dev, slave);
3811 rem_slave_cqs(dev, slave); 3837 rem_slave_cqs(dev, slave);
@@ -3814,6 +3840,5 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3814 rem_slave_mtts(dev, slave); 3840 rem_slave_mtts(dev, slave);
3815 rem_slave_counters(dev, slave); 3841 rem_slave_counters(dev, slave);
3816 rem_slave_xrcdns(dev, slave); 3842 rem_slave_xrcdns(dev, slave);
3817 rem_slave_fs_rule(dev, slave);
3818 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 3843 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3819} 3844}
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 33bcb63d56a2..8fb481252e2c 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
528 for (; rxfc != 0; rxfc--) { 528 for (; rxfc != 0; rxfc--) {
529 rxh = ks8851_rdreg32(ks, KS_RXFHSR); 529 rxh = ks8851_rdreg32(ks, KS_RXFHSR);
530 rxstat = rxh & 0xffff; 530 rxstat = rxh & 0xffff;
531 rxlen = rxh >> 16; 531 rxlen = (rxh >> 16) & 0xfff;
532 532
533 netif_dbg(ks, rx_status, ks->netdev, 533 netif_dbg(ks, rx_status, ks->netdev,
534 "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); 534 "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index c4122c86f829..efa29b712d5f 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1472,7 +1472,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1472 } 1472 }
1473 platform_set_drvdata(pdev, ndev); 1473 platform_set_drvdata(pdev, ndev);
1474 1474
1475 if (lpc_mii_init(pldat) != 0) 1475 ret = lpc_mii_init(pldat);
1476 if (ret)
1476 goto err_out_unregister_netdev; 1477 goto err_out_unregister_netdev;
1477 1478
1478 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", 1479 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 39ab4d09faaa..73ce7dd6b954 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1726,9 +1726,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1726 1726
1727 skb->protocol = eth_type_trans(skb, netdev); 1727 skb->protocol = eth_type_trans(skb, netdev);
1728 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) 1728 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1729 skb->ip_summed = CHECKSUM_NONE;
1730 else
1731 skb->ip_summed = CHECKSUM_UNNECESSARY; 1729 skb->ip_summed = CHECKSUM_UNNECESSARY;
1730 else
1731 skb->ip_summed = CHECKSUM_NONE;
1732 1732
1733 napi_gro_receive(&adapter->napi, skb); 1733 napi_gro_receive(&adapter->napi, skb);
1734 (*work_done)++; 1734 (*work_done)++;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 28fb50a1e9c3..4ecbe64a758d 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3818,6 +3818,30 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3818 } 3818 }
3819} 3819}
3820 3820
3821static void rtl_speed_down(struct rtl8169_private *tp)
3822{
3823 u32 adv;
3824 int lpa;
3825
3826 rtl_writephy(tp, 0x1f, 0x0000);
3827 lpa = rtl_readphy(tp, MII_LPA);
3828
3829 if (lpa & (LPA_10HALF | LPA_10FULL))
3830 adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
3831 else if (lpa & (LPA_100HALF | LPA_100FULL))
3832 adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3833 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
3834 else
3835 adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3836 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3837 (tp->mii.supports_gmii ?
3838 ADVERTISED_1000baseT_Half |
3839 ADVERTISED_1000baseT_Full : 0);
3840
3841 rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3842 adv);
3843}
3844
3821static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) 3845static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3822{ 3846{
3823 void __iomem *ioaddr = tp->mmio_addr; 3847 void __iomem *ioaddr = tp->mmio_addr;
@@ -3848,9 +3872,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3848 if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) 3872 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3849 return false; 3873 return false;
3850 3874
3851 rtl_writephy(tp, 0x1f, 0x0000); 3875 rtl_speed_down(tp);
3852 rtl_writephy(tp, MII_BMCR, 0x0000);
3853
3854 rtl_wol_suspend_quirk(tp); 3876 rtl_wol_suspend_quirk(tp);
3855 3877
3856 return true; 3878 return true;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 33e96176e4d8..6ed333fe5c04 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1216,10 +1216,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
1216 if (felic_stat & ECSR_LCHNG) { 1216 if (felic_stat & ECSR_LCHNG) {
1217 /* Link Changed */ 1217 /* Link Changed */
1218 if (mdp->cd->no_psr || mdp->no_ether_link) { 1218 if (mdp->cd->no_psr || mdp->no_ether_link) {
1219 if (mdp->link == PHY_DOWN) 1219 goto ignore_link;
1220 link_stat = 0;
1221 else
1222 link_stat = PHY_ST_LINK;
1223 } else { 1220 } else {
1224 link_stat = (sh_eth_read(ndev, PSR)); 1221 link_stat = (sh_eth_read(ndev, PSR));
1225 if (mdp->ether_link_active_low) 1222 if (mdp->ether_link_active_low)
@@ -1242,6 +1239,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
1242 } 1239 }
1243 } 1240 }
1244 1241
1242ignore_link:
1245 if (intr_status & EESR_TWB) { 1243 if (intr_status & EESR_TWB) {
1246 /* Write buck end. unused write back interrupt */ 1244 /* Write buck end. unused write back interrupt */
1247 if (intr_status & EESR_TABT) /* Transmit Abort int */ 1245 if (intr_status & EESR_TABT) /* Transmit Abort int */
@@ -1326,12 +1324,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1326 struct sh_eth_private *mdp = netdev_priv(ndev); 1324 struct sh_eth_private *mdp = netdev_priv(ndev);
1327 struct sh_eth_cpu_data *cd = mdp->cd; 1325 struct sh_eth_cpu_data *cd = mdp->cd;
1328 irqreturn_t ret = IRQ_NONE; 1326 irqreturn_t ret = IRQ_NONE;
1329 u32 intr_status = 0; 1327 unsigned long intr_status;
1330 1328
1331 spin_lock(&mdp->lock); 1329 spin_lock(&mdp->lock);
1332 1330
1333 /* Get interrpt stat */ 1331 /* Get interrupt status */
1334 intr_status = sh_eth_read(ndev, EESR); 1332 intr_status = sh_eth_read(ndev, EESR);
1333 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1334 * enabled since it's the one that comes thru regardless of the mask,
1335 * and we need to fully handle it in sh_eth_error() in order to quench
1336 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1337 */
1338 intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI;
1335 /* Clear interrupt */ 1339 /* Clear interrupt */
1336 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 1340 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
1337 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | 1341 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
@@ -1373,7 +1377,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1373 struct phy_device *phydev = mdp->phydev; 1377 struct phy_device *phydev = mdp->phydev;
1374 int new_state = 0; 1378 int new_state = 0;
1375 1379
1376 if (phydev->link != PHY_DOWN) { 1380 if (phydev->link) {
1377 if (phydev->duplex != mdp->duplex) { 1381 if (phydev->duplex != mdp->duplex) {
1378 new_state = 1; 1382 new_state = 1;
1379 mdp->duplex = phydev->duplex; 1383 mdp->duplex = phydev->duplex;
@@ -1387,17 +1391,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1387 if (mdp->cd->set_rate) 1391 if (mdp->cd->set_rate)
1388 mdp->cd->set_rate(ndev); 1392 mdp->cd->set_rate(ndev);
1389 } 1393 }
1390 if (mdp->link == PHY_DOWN) { 1394 if (!mdp->link) {
1391 sh_eth_write(ndev, 1395 sh_eth_write(ndev,
1392 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); 1396 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
1393 new_state = 1; 1397 new_state = 1;
1394 mdp->link = phydev->link; 1398 mdp->link = phydev->link;
1399 if (mdp->cd->no_psr || mdp->no_ether_link)
1400 sh_eth_rcv_snd_enable(ndev);
1395 } 1401 }
1396 } else if (mdp->link) { 1402 } else if (mdp->link) {
1397 new_state = 1; 1403 new_state = 1;
1398 mdp->link = PHY_DOWN; 1404 mdp->link = 0;
1399 mdp->speed = 0; 1405 mdp->speed = 0;
1400 mdp->duplex = -1; 1406 mdp->duplex = -1;
1407 if (mdp->cd->no_psr || mdp->no_ether_link)
1408 sh_eth_rcv_snd_disable(ndev);
1401 } 1409 }
1402 1410
1403 if (new_state && netif_msg_link(mdp)) 1411 if (new_state && netif_msg_link(mdp))
@@ -1414,7 +1422,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
1414 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 1422 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1415 mdp->mii_bus->id , mdp->phy_id); 1423 mdp->mii_bus->id , mdp->phy_id);
1416 1424
1417 mdp->link = PHY_DOWN; 1425 mdp->link = 0;
1418 mdp->speed = 0; 1426 mdp->speed = 0;
1419 mdp->duplex = -1; 1427 mdp->duplex = -1;
1420 1428
@@ -2220,6 +2228,7 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2220/* MDIO bus release function */ 2228/* MDIO bus release function */
2221static int sh_mdio_release(struct net_device *ndev) 2229static int sh_mdio_release(struct net_device *ndev)
2222{ 2230{
2231 struct sh_eth_private *mdp = netdev_priv(ndev);
2223 struct mii_bus *bus = dev_get_drvdata(&ndev->dev); 2232 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2224 2233
2225 /* unregister mdio bus */ 2234 /* unregister mdio bus */
@@ -2234,6 +2243,9 @@ static int sh_mdio_release(struct net_device *ndev)
2234 /* free bitbang info */ 2243 /* free bitbang info */
2235 free_mdio_bitbang(bus); 2244 free_mdio_bitbang(bus);
2236 2245
2246 /* free bitbang memory */
2247 kfree(mdp->bitbang);
2248
2237 return 0; 2249 return 0;
2238} 2250}
2239 2251
@@ -2262,6 +2274,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2262 bitbang->ctrl.ops = &bb_ops; 2274 bitbang->ctrl.ops = &bb_ops;
2263 2275
2264 /* MII controller setting */ 2276 /* MII controller setting */
2277 mdp->bitbang = bitbang;
2265 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2278 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2266 if (!mdp->mii_bus) { 2279 if (!mdp->mii_bus) {
2267 ret = -ENOMEM; 2280 ret = -ENOMEM;
@@ -2441,6 +2454,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2441 } 2454 }
2442 mdp->tsu_addr = ioremap(rtsu->start, 2455 mdp->tsu_addr = ioremap(rtsu->start,
2443 resource_size(rtsu)); 2456 resource_size(rtsu));
2457 if (mdp->tsu_addr == NULL) {
2458 ret = -ENOMEM;
2459 dev_err(&pdev->dev, "TSU ioremap failed.\n");
2460 goto out_release;
2461 }
2444 mdp->port = devno % 2; 2462 mdp->port = devno % 2;
2445 ndev->features = NETIF_F_HW_VLAN_FILTER; 2463 ndev->features = NETIF_F_HW_VLAN_FILTER;
2446 } 2464 }
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index bae84fd2e73a..828be4515008 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -705,6 +705,7 @@ struct sh_eth_private {
705 const u16 *reg_offset; 705 const u16 *reg_offset;
706 void __iomem *addr; 706 void __iomem *addr;
707 void __iomem *tsu_addr; 707 void __iomem *tsu_addr;
708 struct bb_info *bitbang;
708 u32 num_rx_ring; 709 u32 num_rx_ring;
709 u32 num_tx_ring; 710 u32 num_tx_ring;
710 dma_addr_t rx_desc_dma; 711 dma_addr_t rx_desc_dma;
@@ -722,7 +723,7 @@ struct sh_eth_private {
722 u32 phy_id; /* PHY ID */ 723 u32 phy_id; /* PHY ID */
723 struct mii_bus *mii_bus; /* MDIO bus control */ 724 struct mii_bus *mii_bus; /* MDIO bus control */
724 struct phy_device *phydev; /* PHY device control */ 725 struct phy_device *phydev; /* PHY device control */
725 enum phy_state link; 726 int link;
726 phy_interface_t phy_interface; 727 phy_interface_t phy_interface;
727 int msg_enable; 728 int msg_enable;
728 int speed; 729 int speed;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 75c48558e6fd..80cad06e5eb2 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status)
436 * queue is stopped then start the queue as we have free desc for tx 436 * queue is stopped then start the queue as we have free desc for tx
437 */ 437 */
438 if (unlikely(netif_queue_stopped(ndev))) 438 if (unlikely(netif_queue_stopped(ndev)))
439 netif_start_queue(ndev); 439 netif_wake_queue(ndev);
440 cpts_tx_timestamp(priv->cpts, skb); 440 cpts_tx_timestamp(priv->cpts, skb);
441 priv->stats.tx_packets++; 441 priv->stats.tx_packets++;
442 priv->stats.tx_bytes += len; 442 priv->stats.tx_bytes += len;
@@ -1364,7 +1364,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1364 struct platform_device *mdio; 1364 struct platform_device *mdio;
1365 1365
1366 parp = of_get_property(slave_node, "phy_id", &lenp); 1366 parp = of_get_property(slave_node, "phy_id", &lenp);
1367 if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) { 1367 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
1368 pr_err("Missing slave[%d] phy_id property\n", i); 1368 pr_err("Missing slave[%d] phy_id property\n", i);
1369 ret = -EINVAL; 1369 ret = -EINVAL;
1370 goto error_ret; 1370 goto error_ret;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index ae1b77aa199f..72300bc9e378 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status)
1053 * queue is stopped then start the queue as we have free desc for tx 1053 * queue is stopped then start the queue as we have free desc for tx
1054 */ 1054 */
1055 if (unlikely(netif_queue_stopped(ndev))) 1055 if (unlikely(netif_queue_stopped(ndev)))
1056 netif_start_queue(ndev); 1056 netif_wake_queue(ndev);
1057 ndev->stats.tx_packets++; 1057 ndev->stats.tx_packets++;
1058 ndev->stats.tx_bytes += len; 1058 ndev->stats.tx_bytes += len;
1059 dev_kfree_skb_any(skb); 1059 dev_kfree_skb_any(skb);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 1cd77483da50..f5f0f09e4cc5 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -470,8 +470,10 @@ static void netvsc_send_completion(struct hv_device *device,
470 packet->trans_id; 470 packet->trans_id;
471 471
472 /* Notify the layer above us */ 472 /* Notify the layer above us */
473 nvsc_packet->completion.send.send_completion( 473 if (nvsc_packet)
474 nvsc_packet->completion.send.send_completion_ctx); 474 nvsc_packet->completion.send.send_completion(
475 nvsc_packet->completion.send.
476 send_completion_ctx);
475 477
476 num_outstanding_sends = 478 num_outstanding_sends =
477 atomic_dec_return(&net_device->num_outstanding_sends); 479 atomic_dec_return(&net_device->num_outstanding_sends);
@@ -498,6 +500,7 @@ int netvsc_send(struct hv_device *device,
498 int ret = 0; 500 int ret = 0;
499 struct nvsp_message sendMessage; 501 struct nvsp_message sendMessage;
500 struct net_device *ndev; 502 struct net_device *ndev;
503 u64 req_id;
501 504
502 net_device = get_outbound_net_device(device); 505 net_device = get_outbound_net_device(device);
503 if (!net_device) 506 if (!net_device)
@@ -518,20 +521,24 @@ int netvsc_send(struct hv_device *device,
518 0xFFFFFFFF; 521 0xFFFFFFFF;
519 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; 522 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
520 523
524 if (packet->completion.send.send_completion)
525 req_id = (u64)packet;
526 else
527 req_id = 0;
528
521 if (packet->page_buf_cnt) { 529 if (packet->page_buf_cnt) {
522 ret = vmbus_sendpacket_pagebuffer(device->channel, 530 ret = vmbus_sendpacket_pagebuffer(device->channel,
523 packet->page_buf, 531 packet->page_buf,
524 packet->page_buf_cnt, 532 packet->page_buf_cnt,
525 &sendMessage, 533 &sendMessage,
526 sizeof(struct nvsp_message), 534 sizeof(struct nvsp_message),
527 (unsigned long)packet); 535 req_id);
528 } else { 536 } else {
529 ret = vmbus_sendpacket(device->channel, &sendMessage, 537 ret = vmbus_sendpacket(device->channel, &sendMessage,
530 sizeof(struct nvsp_message), 538 sizeof(struct nvsp_message),
531 (unsigned long)packet, 539 req_id,
532 VM_PKT_DATA_INBAND, 540 VM_PKT_DATA_INBAND,
533 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 541 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
534
535 } 542 }
536 543
537 if (ret == 0) { 544 if (ret == 0) {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5f85205cd12b..8341b62e5521 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -241,13 +241,11 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
241 241
242 if (status == 1) { 242 if (status == 1) {
243 netif_carrier_on(net); 243 netif_carrier_on(net);
244 netif_wake_queue(net);
245 ndev_ctx = netdev_priv(net); 244 ndev_ctx = netdev_priv(net);
246 schedule_delayed_work(&ndev_ctx->dwork, 0); 245 schedule_delayed_work(&ndev_ctx->dwork, 0);
247 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); 246 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
248 } else { 247 } else {
249 netif_carrier_off(net); 248 netif_carrier_off(net);
250 netif_tx_disable(net);
251 } 249 }
252} 250}
253 251
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2b657d4d63a8..0775f0aefd1e 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -61,9 +61,6 @@ struct rndis_request {
61 61
62static void rndis_filter_send_completion(void *ctx); 62static void rndis_filter_send_completion(void *ctx);
63 63
64static void rndis_filter_send_request_completion(void *ctx);
65
66
67 64
68static struct rndis_device *get_rndis_device(void) 65static struct rndis_device *get_rndis_device(void)
69{ 66{
@@ -241,10 +238,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
241 packet->page_buf[0].len; 238 packet->page_buf[0].len;
242 } 239 }
243 240
244 packet->completion.send.send_completion_ctx = req;/* packet; */ 241 packet->completion.send.send_completion = NULL;
245 packet->completion.send.send_completion =
246 rndis_filter_send_request_completion;
247 packet->completion.send.send_completion_tid = (unsigned long)dev;
248 242
249 ret = netvsc_send(dev->net_dev->dev, packet); 243 ret = netvsc_send(dev->net_dev->dev, packet);
250 return ret; 244 return ret;
@@ -999,9 +993,3 @@ static void rndis_filter_send_completion(void *ctx)
999 /* Pass it back to the original handler */ 993 /* Pass it back to the original handler */
1000 filter_pkt->completion(filter_pkt->completion_ctx); 994 filter_pkt->completion(filter_pkt->completion_ctx);
1001} 995}
1002
1003
1004static void rndis_filter_send_request_completion(void *ctx)
1005{
1006 /* Noop */
1007}
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 9abe51710f22..1a15ec14c386 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -914,8 +914,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
914static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) 914static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
915{ 915{
916 struct usbnet *dev = netdev_priv(netdev); 916 struct usbnet *dev = netdev_priv(netdev);
917 int ret;
918
919 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
920 return -EINVAL;
917 921
918 int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); 922 ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
919 if (ret < 0) { 923 if (ret < 0) {
920 netdev_warn(dev->net, "Failed to set mac rx frame length\n"); 924 netdev_warn(dev->net, "Failed to set mac rx frame length\n");
921 return ret; 925 return ret;
@@ -1324,7 +1328,7 @@ static int smsc75xx_reset(struct usbnet *dev)
1324 1328
1325 netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); 1329 netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf);
1326 1330
1327 ret = smsc75xx_set_rx_max_frame_length(dev, 1514); 1331 ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
1328 if (ret < 0) { 1332 if (ret < 0) {
1329 netdev_warn(dev->net, "Failed to set max rx frame length\n"); 1333 netdev_warn(dev->net, "Failed to set max rx frame length\n");
1330 return ret; 1334 return ret;
@@ -2134,8 +2138,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2134 else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) 2138 else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
2135 dev->net->stats.rx_frame_errors++; 2139 dev->net->stats.rx_frame_errors++;
2136 } else { 2140 } else {
2137 /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ 2141 /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
2138 if (unlikely(size > (ETH_FRAME_LEN + 12))) { 2142 if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
2139 netif_dbg(dev, rx_err, dev->net, 2143 netif_dbg(dev, rx_err, dev->net,
2140 "size err rx_cmd_a=0x%08x\n", 2144 "size err rx_cmd_a=0x%08x\n",
2141 rx_cmd_a); 2145 rx_cmd_a);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 4cc13940c895..f76c3ca07a45 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1023,6 +1023,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
1023 AR_PHY_AGC_CONTROL_FLTR_CAL | 1023 AR_PHY_AGC_CONTROL_FLTR_CAL |
1024 AR_PHY_AGC_CONTROL_PKDET_CAL; 1024 AR_PHY_AGC_CONTROL_PKDET_CAL;
1025 1025
1026 /* Use chip chainmask only for calibration */
1026 ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask); 1027 ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
1027 1028
1028 if (rtt) { 1029 if (rtt) {
@@ -1150,6 +1151,9 @@ skip_tx_iqcal:
1150 ar9003_hw_rtt_disable(ah); 1151 ar9003_hw_rtt_disable(ah);
1151 } 1152 }
1152 1153
1154 /* Revert chainmask to runtime parameters */
1155 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
1156
1153 /* Initialize list pointers */ 1157 /* Initialize list pointers */
1154 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; 1158 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
1155 1159
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index ade3afb21f91..7fdac6c7b3ea 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -28,21 +28,21 @@ void ath_tx_complete_poll_work(struct work_struct *work)
28 int i; 28 int i;
29 bool needreset = false; 29 bool needreset = false;
30 30
31 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 31 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
32 if (ATH_TXQ_SETUP(sc, i)) { 32 txq = sc->tx.txq_map[i];
33 txq = &sc->tx.txq[i]; 33
34 ath_txq_lock(sc, txq); 34 ath_txq_lock(sc, txq);
35 if (txq->axq_depth) { 35 if (txq->axq_depth) {
36 if (txq->axq_tx_inprogress) { 36 if (txq->axq_tx_inprogress) {
37 needreset = true; 37 needreset = true;
38 ath_txq_unlock(sc, txq); 38 ath_txq_unlock(sc, txq);
39 break; 39 break;
40 } else { 40 } else {
41 txq->axq_tx_inprogress = true; 41 txq->axq_tx_inprogress = true;
42 }
43 } 42 }
44 ath_txq_unlock_complete(sc, txq);
45 } 43 }
44 ath_txq_unlock_complete(sc, txq);
45 }
46 46
47 if (needreset) { 47 if (needreset) {
48 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, 48 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
@@ -170,7 +170,8 @@ void ath_rx_poll(unsigned long data)
170{ 170{
171 struct ath_softc *sc = (struct ath_softc *)data; 171 struct ath_softc *sc = (struct ath_softc *)data;
172 172
173 ieee80211_queue_work(sc->hw, &sc->hw_check_work); 173 if (!test_bit(SC_OP_INVALID, &sc->sc_flags))
174 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
174} 175}
175 176
176/* 177/*
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6e66f9c6782b..988372d218a4 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -280,6 +280,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
280 if (r) { 280 if (r) {
281 ath_err(common, 281 ath_err(common,
282 "Unable to reset channel, reset status %d\n", r); 282 "Unable to reset channel, reset status %d\n", r);
283
284 ath9k_hw_enable_interrupts(ah);
285 ath9k_queue_reset(sc, RESET_TYPE_BB_HANG);
286
283 goto out; 287 goto out;
284 } 288 }
285 289
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 38bc5a7997ff..122146943bf2 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1487,8 +1487,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1487 const struct b43_dma_ops *ops; 1487 const struct b43_dma_ops *ops;
1488 struct b43_dmaring *ring; 1488 struct b43_dmaring *ring;
1489 struct b43_dmadesc_meta *meta; 1489 struct b43_dmadesc_meta *meta;
1490 static const struct b43_txstatus fake; /* filled with 0 */
1491 const struct b43_txstatus *txstat;
1490 int slot, firstused; 1492 int slot, firstused;
1491 bool frame_succeed; 1493 bool frame_succeed;
1494 int skip;
1495 static u8 err_out1, err_out2;
1492 1496
1493 ring = parse_cookie(dev, status->cookie, &slot); 1497 ring = parse_cookie(dev, status->cookie, &slot);
1494 if (unlikely(!ring)) 1498 if (unlikely(!ring))
@@ -1501,13 +1505,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1501 firstused = ring->current_slot - ring->used_slots + 1; 1505 firstused = ring->current_slot - ring->used_slots + 1;
1502 if (firstused < 0) 1506 if (firstused < 0)
1503 firstused = ring->nr_slots + firstused; 1507 firstused = ring->nr_slots + firstused;
1508
1509 skip = 0;
1504 if (unlikely(slot != firstused)) { 1510 if (unlikely(slot != firstused)) {
1505 /* This possibly is a firmware bug and will result in 1511 /* This possibly is a firmware bug and will result in
1506 * malfunction, memory leaks and/or stall of DMA functionality. */ 1512 * malfunction, memory leaks and/or stall of DMA functionality.
1507 b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " 1513 */
1508 "Expected %d, but got %d\n", 1514 if (slot == next_slot(ring, next_slot(ring, firstused))) {
1509 ring->index, firstused, slot); 1515 /* If a single header/data pair was missed, skip over
1510 return; 1516 * the first two slots in an attempt to recover.
1517 */
1518 slot = firstused;
1519 skip = 2;
1520 if (!err_out1) {
1521 /* Report the error once. */
1522 b43dbg(dev->wl,
1523 "Skip on DMA ring %d slot %d.\n",
1524 ring->index, slot);
1525 err_out1 = 1;
1526 }
1527 } else {
1528 /* More than a single header/data pair were missed.
1529 * Report this error once.
1530 */
1531 if (!err_out2)
1532 b43dbg(dev->wl,
1533 "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
1534 ring->index, firstused, slot);
1535 err_out2 = 1;
1536 return;
1537 }
1511 } 1538 }
1512 1539
1513 ops = ring->ops; 1540 ops = ring->ops;
@@ -1522,11 +1549,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1522 slot, firstused, ring->index); 1549 slot, firstused, ring->index);
1523 break; 1550 break;
1524 } 1551 }
1552
1525 if (meta->skb) { 1553 if (meta->skb) {
1526 struct b43_private_tx_info *priv_info = 1554 struct b43_private_tx_info *priv_info =
1527 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); 1555 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
1528 1556
1529 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); 1557 unmap_descbuffer(ring, meta->dmaaddr,
1558 meta->skb->len, 1);
1530 kfree(priv_info->bouncebuffer); 1559 kfree(priv_info->bouncebuffer);
1531 priv_info->bouncebuffer = NULL; 1560 priv_info->bouncebuffer = NULL;
1532 } else { 1561 } else {
@@ -1538,8 +1567,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1538 struct ieee80211_tx_info *info; 1567 struct ieee80211_tx_info *info;
1539 1568
1540 if (unlikely(!meta->skb)) { 1569 if (unlikely(!meta->skb)) {
1541 /* This is a scatter-gather fragment of a frame, so 1570 /* This is a scatter-gather fragment of a frame,
1542 * the skb pointer must not be NULL. */ 1571 * so the skb pointer must not be NULL.
1572 */
1543 b43dbg(dev->wl, "TX status unexpected NULL skb " 1573 b43dbg(dev->wl, "TX status unexpected NULL skb "
1544 "at slot %d (first=%d) on ring %d\n", 1574 "at slot %d (first=%d) on ring %d\n",
1545 slot, firstused, ring->index); 1575 slot, firstused, ring->index);
@@ -1550,9 +1580,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1550 1580
1551 /* 1581 /*
1552 * Call back to inform the ieee80211 subsystem about 1582 * Call back to inform the ieee80211 subsystem about
1553 * the status of the transmission. 1583 * the status of the transmission. When skipping over
1584 * a missed TX status report, use a status structure
1585 * filled with zeros to indicate that the frame was not
1586 * sent (frame_count 0) and not acknowledged
1554 */ 1587 */
1555 frame_succeed = b43_fill_txstatus_report(dev, info, status); 1588 if (unlikely(skip))
1589 txstat = &fake;
1590 else
1591 txstat = status;
1592
1593 frame_succeed = b43_fill_txstatus_report(dev, info,
1594 txstat);
1556#ifdef CONFIG_B43_DEBUG 1595#ifdef CONFIG_B43_DEBUG
1557 if (frame_succeed) 1596 if (frame_succeed)
1558 ring->nr_succeed_tx_packets++; 1597 ring->nr_succeed_tx_packets++;
@@ -1580,12 +1619,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1580 /* Everything unmapped and free'd. So it's not used anymore. */ 1619 /* Everything unmapped and free'd. So it's not used anymore. */
1581 ring->used_slots--; 1620 ring->used_slots--;
1582 1621
1583 if (meta->is_last_fragment) { 1622 if (meta->is_last_fragment && !skip) {
1584 /* This is the last scatter-gather 1623 /* This is the last scatter-gather
1585 * fragment of the frame. We are done. */ 1624 * fragment of the frame. We are done. */
1586 break; 1625 break;
1587 } 1626 }
1588 slot = next_slot(ring, slot); 1627 slot = next_slot(ring, slot);
1628 if (skip > 0)
1629 --skip;
1589 } 1630 }
1590 if (ring->stopped) { 1631 if (ring->stopped) {
1591 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); 1632 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 3c35382ee6c2..e8486c1e091a 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1564 u16 clip_off[2] = { 0xFFFF, 0xFFFF }; 1564 u16 clip_off[2] = { 0xFFFF, 0xFFFF };
1565 1565
1566 u8 vcm_final = 0; 1566 u8 vcm_final = 0;
1567 s8 offset[4]; 1567 s32 offset[4];
1568 s32 results[8][4] = { }; 1568 s32 results[8][4] = { };
1569 s32 results_min[4] = { }; 1569 s32 results_min[4] = { };
1570 s32 poll_results[4] = { }; 1570 s32 poll_results[4] = { };
@@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1615 } 1615 }
1616 for (i = 0; i < 4; i += 2) { 1616 for (i = 0; i < 4; i += 2) {
1617 s32 curr; 1617 s32 curr;
1618 s32 mind = 40; 1618 s32 mind = 0x100000;
1619 s32 minpoll = 249; 1619 s32 minpoll = 249;
1620 u8 minvcm = 0; 1620 u8 minvcm = 0;
1621 if (2 * core != i) 1621 if (2 * core != i)
@@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
1732 u8 regs_save_radio[2]; 1732 u8 regs_save_radio[2];
1733 u16 regs_save_phy[2]; 1733 u16 regs_save_phy[2];
1734 1734
1735 s8 offset[4]; 1735 s32 offset[4];
1736 u8 core; 1736 u8 core;
1737 u8 rail; 1737 u8 rail;
1738 1738
@@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
1799 } 1799 }
1800 1800
1801 for (i = 0; i < 4; i++) { 1801 for (i = 0; i < 4; i++) {
1802 s32 mind = 40; 1802 s32 mind = 0x100000;
1803 u8 minvcm = 0; 1803 u8 minvcm = 0;
1804 s32 minpoll = 249; 1804 s32 minpoll = 249;
1805 s32 curr; 1805 s32 curr;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 4469321c0eb3..35fc68be158d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3317 goto err; 3317 goto err;
3318 } 3318 }
3319 3319
3320 /* External image takes precedence if specified */
3321 if (brcmf_sdbrcm_download_code_file(bus)) { 3320 if (brcmf_sdbrcm_download_code_file(bus)) {
3322 brcmf_err("dongle image file download failed\n"); 3321 brcmf_err("dongle image file download failed\n");
3323 goto err; 3322 goto err;
3324 } 3323 }
3325 3324
3326 /* External nvram takes precedence if specified */ 3325 if (brcmf_sdbrcm_download_nvram(bus)) {
3327 if (brcmf_sdbrcm_download_nvram(bus))
3328 brcmf_err("dongle nvram file download failed\n"); 3326 brcmf_err("dongle nvram file download failed\n");
3327 goto err;
3328 }
3329 3329
3330 /* Take arm out of reset */ 3330 /* Take arm out of reset */
3331 if (brcmf_sdbrcm_download_state(bus, false)) { 3331 if (brcmf_sdbrcm_download_state(bus, false)) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 2af9c0f0798d..ec46ffff5409 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1891,8 +1891,10 @@ static s32
1891brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, 1891brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
1892 u8 key_idx, const u8 *mac_addr, struct key_params *params) 1892 u8 key_idx, const u8 *mac_addr, struct key_params *params)
1893{ 1893{
1894 struct brcmf_if *ifp = netdev_priv(ndev);
1894 struct brcmf_wsec_key key; 1895 struct brcmf_wsec_key key;
1895 s32 err = 0; 1896 s32 err = 0;
1897 u8 keybuf[8];
1896 1898
1897 memset(&key, 0, sizeof(key)); 1899 memset(&key, 0, sizeof(key));
1898 key.index = (u32) key_idx; 1900 key.index = (u32) key_idx;
@@ -1916,8 +1918,9 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
1916 brcmf_dbg(CONN, "Setting the key index %d\n", key.index); 1918 brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
1917 memcpy(key.data, params->key, key.len); 1919 memcpy(key.data, params->key, key.len);
1918 1920
1919 if (params->cipher == WLAN_CIPHER_SUITE_TKIP) { 1921 if ((ifp->vif->mode != WL_MODE_AP) &&
1920 u8 keybuf[8]; 1922 (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
1923 brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
1921 memcpy(keybuf, &key.data[24], sizeof(keybuf)); 1924 memcpy(keybuf, &key.data[24], sizeof(keybuf));
1922 memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); 1925 memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
1923 memcpy(&key.data[16], keybuf, sizeof(keybuf)); 1926 memcpy(&key.data[16], keybuf, sizeof(keybuf));
@@ -2013,7 +2016,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
2013 break; 2016 break;
2014 case WLAN_CIPHER_SUITE_TKIP: 2017 case WLAN_CIPHER_SUITE_TKIP:
2015 if (ifp->vif->mode != WL_MODE_AP) { 2018 if (ifp->vif->mode != WL_MODE_AP) {
2016 brcmf_dbg(CONN, "Swapping key\n"); 2019 brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
2017 memcpy(keybuf, &key.data[24], sizeof(keybuf)); 2020 memcpy(keybuf, &key.data[24], sizeof(keybuf));
2018 memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); 2021 memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
2019 memcpy(&key.data[16], keybuf, sizeof(keybuf)); 2022 memcpy(&key.data[16], keybuf, sizeof(keybuf));
@@ -2118,8 +2121,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
2118 err = -EAGAIN; 2121 err = -EAGAIN;
2119 goto done; 2122 goto done;
2120 } 2123 }
2121 switch (wsec & ~SES_OW_ENABLED) { 2124 if (wsec & WEP_ENABLED) {
2122 case WEP_ENABLED:
2123 sec = &profile->sec; 2125 sec = &profile->sec;
2124 if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { 2126 if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
2125 params.cipher = WLAN_CIPHER_SUITE_WEP40; 2127 params.cipher = WLAN_CIPHER_SUITE_WEP40;
@@ -2128,16 +2130,13 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
2128 params.cipher = WLAN_CIPHER_SUITE_WEP104; 2130 params.cipher = WLAN_CIPHER_SUITE_WEP104;
2129 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); 2131 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
2130 } 2132 }
2131 break; 2133 } else if (wsec & TKIP_ENABLED) {
2132 case TKIP_ENABLED:
2133 params.cipher = WLAN_CIPHER_SUITE_TKIP; 2134 params.cipher = WLAN_CIPHER_SUITE_TKIP;
2134 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); 2135 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
2135 break; 2136 } else if (wsec & AES_ENABLED) {
2136 case AES_ENABLED:
2137 params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; 2137 params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
2138 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); 2138 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
2139 break; 2139 } else {
2140 default:
2141 brcmf_err("Invalid algo (0x%x)\n", wsec); 2140 brcmf_err("Invalid algo (0x%x)\n", wsec);
2142 err = -EINVAL; 2141 err = -EINVAL;
2143 goto done; 2142 goto done;
@@ -3824,8 +3823,9 @@ exit:
3824static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) 3823static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
3825{ 3824{
3826 struct brcmf_if *ifp = netdev_priv(ndev); 3825 struct brcmf_if *ifp = netdev_priv(ndev);
3827 s32 err = -EPERM; 3826 s32 err;
3828 struct brcmf_fil_bss_enable_le bss_enable; 3827 struct brcmf_fil_bss_enable_le bss_enable;
3828 struct brcmf_join_params join_params;
3829 3829
3830 brcmf_dbg(TRACE, "Enter\n"); 3830 brcmf_dbg(TRACE, "Enter\n");
3831 3831
@@ -3833,16 +3833,21 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
3833 /* Due to most likely deauths outstanding we sleep */ 3833 /* Due to most likely deauths outstanding we sleep */
3834 /* first to make sure they get processed by fw. */ 3834 /* first to make sure they get processed by fw. */
3835 msleep(400); 3835 msleep(400);
3836 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); 3836
3837 if (err < 0) { 3837 memset(&join_params, 0, sizeof(join_params));
3838 brcmf_err("setting AP mode failed %d\n", err); 3838 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
3839 goto exit; 3839 &join_params, sizeof(join_params));
3840 } 3840 if (err < 0)
3841 brcmf_err("SET SSID error (%d)\n", err);
3841 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); 3842 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
3842 if (err < 0) { 3843 if (err < 0)
3843 brcmf_err("BRCMF_C_UP error %d\n", err); 3844 brcmf_err("BRCMF_C_UP error %d\n", err);
3844 goto exit; 3845 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
3845 } 3846 if (err < 0)
3847 brcmf_err("setting AP mode failed %d\n", err);
3848 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
3849 if (err < 0)
3850 brcmf_err("setting INFRA mode failed %d\n", err);
3846 } else { 3851 } else {
3847 bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); 3852 bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
3848 bss_enable.enable = cpu_to_le32(0); 3853 bss_enable.enable = cpu_to_le32(0);
@@ -3855,7 +3860,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
3855 set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); 3860 set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
3856 clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); 3861 clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
3857 3862
3858exit:
3859 return err; 3863 return err;
3860} 3864}
3861 3865
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index 21a824232478..18d37645e2cd 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1137,9 +1137,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
1137 gain0_15 = ((biq1 & 0xf) << 12) | 1137 gain0_15 = ((biq1 & 0xf) << 12) |
1138 ((tia & 0xf) << 8) | 1138 ((tia & 0xf) << 8) |
1139 ((lna2 & 0x3) << 6) | 1139 ((lna2 & 0x3) << 6) |
1140 ((lna2 & 0x3) << 4) | 1140 ((lna2 &
1141 ((lna1 & 0x3) << 2) | 1141 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
1142 ((lna1 & 0x3) << 0);
1143 1142
1144 mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); 1143 mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
1145 mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); 1144 mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
@@ -1157,8 +1156,6 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
1157 } 1156 }
1158 1157
1159 mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); 1158 mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0);
1160 mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
1161 mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3);
1162 1159
1163} 1160}
1164 1161
@@ -1331,43 +1328,6 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
1331 return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; 1328 return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
1332} 1329}
1333 1330
1334static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
1335 u16 tia_gain, u16 lna2_gain)
1336{
1337 u32 i_thresh_l, q_thresh_l;
1338 u32 i_thresh_h, q_thresh_h;
1339 struct lcnphy_iq_est iq_est_h, iq_est_l;
1340
1341 wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
1342 lna2_gain, 0);
1343
1344 wlc_lcnphy_rx_gain_override_enable(pi, true);
1345 wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
1346 udelay(500);
1347 write_radio_reg(pi, RADIO_2064_REG112, 0);
1348 if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
1349 return false;
1350
1351 wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
1352 udelay(500);
1353 write_radio_reg(pi, RADIO_2064_REG112, 0);
1354 if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
1355 return false;
1356
1357 i_thresh_l = (iq_est_l.i_pwr << 1);
1358 i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
1359
1360 q_thresh_l = (iq_est_l.q_pwr << 1);
1361 q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
1362 if ((iq_est_h.i_pwr > i_thresh_l) &&
1363 (iq_est_h.i_pwr < i_thresh_h) &&
1364 (iq_est_h.q_pwr > q_thresh_l) &&
1365 (iq_est_h.q_pwr < q_thresh_h))
1366 return true;
1367
1368 return false;
1369}
1370
1371static bool 1331static bool
1372wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, 1332wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
1373 const struct lcnphy_rx_iqcomp *iqcomp, 1333 const struct lcnphy_rx_iqcomp *iqcomp,
@@ -1382,8 +1342,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
1382 RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, 1342 RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
1383 rfoverride3_old, rfoverride3val_old, rfoverride4_old, 1343 rfoverride3_old, rfoverride3val_old, rfoverride4_old,
1384 rfoverride4val_old, afectrlovr_old, afectrlovrval_old; 1344 rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
1385 int tia_gain, lna2_gain, biq1_gain; 1345 int tia_gain;
1386 bool set_gain; 1346 u32 received_power, rx_pwr_threshold;
1387 u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; 1347 u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
1388 u16 values_to_save[11]; 1348 u16 values_to_save[11];
1389 s16 *ptr; 1349 s16 *ptr;
@@ -1408,134 +1368,126 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
1408 goto cal_done; 1368 goto cal_done;
1409 } 1369 }
1410 1370
1411 WARN_ON(module != 1); 1371 if (module == 1) {
1412 tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
1413 wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
1414
1415 for (i = 0; i < 11; i++)
1416 values_to_save[i] =
1417 read_radio_reg(pi, rxiq_cal_rf_reg[i]);
1418 Core1TxControl_old = read_phy_reg(pi, 0x631);
1419
1420 or_phy_reg(pi, 0x631, 0x0015);
1421
1422 RFOverride0_old = read_phy_reg(pi, 0x44c);
1423 RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
1424 rfoverride2_old = read_phy_reg(pi, 0x4b0);
1425 rfoverride2val_old = read_phy_reg(pi, 0x4b1);
1426 rfoverride3_old = read_phy_reg(pi, 0x4f9);
1427 rfoverride3val_old = read_phy_reg(pi, 0x4fa);
1428 rfoverride4_old = read_phy_reg(pi, 0x938);
1429 rfoverride4val_old = read_phy_reg(pi, 0x939);
1430 afectrlovr_old = read_phy_reg(pi, 0x43b);
1431 afectrlovrval_old = read_phy_reg(pi, 0x43c);
1432 old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
1433 old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
1434
1435 tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
1436 if (tx_gain_override_old) {
1437 wlc_lcnphy_get_tx_gain(pi, &old_gains);
1438 tx_gain_index_old = pi_lcn->lcnphy_current_index;
1439 }
1440
1441 wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
1442 1372
1443 mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); 1373 tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
1444 mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); 1374 wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
1445 1375
1446 mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); 1376 for (i = 0; i < 11; i++)
1447 mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); 1377 values_to_save[i] =
1378 read_radio_reg(pi, rxiq_cal_rf_reg[i]);
1379 Core1TxControl_old = read_phy_reg(pi, 0x631);
1380
1381 or_phy_reg(pi, 0x631, 0x0015);
1382
1383 RFOverride0_old = read_phy_reg(pi, 0x44c);
1384 RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
1385 rfoverride2_old = read_phy_reg(pi, 0x4b0);
1386 rfoverride2val_old = read_phy_reg(pi, 0x4b1);
1387 rfoverride3_old = read_phy_reg(pi, 0x4f9);
1388 rfoverride3val_old = read_phy_reg(pi, 0x4fa);
1389 rfoverride4_old = read_phy_reg(pi, 0x938);
1390 rfoverride4val_old = read_phy_reg(pi, 0x939);
1391 afectrlovr_old = read_phy_reg(pi, 0x43b);
1392 afectrlovrval_old = read_phy_reg(pi, 0x43c);
1393 old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
1394 old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
1395
1396 tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
1397 if (tx_gain_override_old) {
1398 wlc_lcnphy_get_tx_gain(pi, &old_gains);
1399 tx_gain_index_old = pi_lcn->lcnphy_current_index;
1400 }
1448 1401
1449 write_radio_reg(pi, RADIO_2064_REG116, 0x06); 1402 wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
1450 write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
1451 write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
1452 write_radio_reg(pi, RADIO_2064_REG098, 0x03);
1453 write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
1454 mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
1455 write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
1456 write_radio_reg(pi, RADIO_2064_REG114, 0x01);
1457 write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
1458 write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
1459
1460 mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
1461 mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
1462 mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
1463 mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
1464 mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
1465 mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
1466 mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
1467 mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
1468 mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
1469 mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
1470 1403
1471 mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); 1404 mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
1472 mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); 1405 mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
1473 1406
1474 write_phy_reg(pi, 0x6da, 0xffff); 1407 mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
1475 or_phy_reg(pi, 0x6db, 0x3); 1408 mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
1476 1409
1477 wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); 1410 write_radio_reg(pi, RADIO_2064_REG116, 0x06);
1478 set_gain = false; 1411 write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
1479 1412 write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
1480 lna2_gain = 3; 1413 write_radio_reg(pi, RADIO_2064_REG098, 0x03);
1481 while ((lna2_gain >= 0) && !set_gain) { 1414 write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
1482 tia_gain = 4; 1415 mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
1483 1416 write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
1484 while ((tia_gain >= 0) && !set_gain) { 1417 write_radio_reg(pi, RADIO_2064_REG114, 0x01);
1485 biq1_gain = 6; 1418 write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
1486 1419 write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
1487 while ((biq1_gain >= 0) && !set_gain) { 1420
1488 set_gain = wlc_lcnphy_rx_iq_cal_gain(pi, 1421 mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
1489 (u16) 1422 mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
1490 biq1_gain, 1423 mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
1491 (u16) 1424 mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
1492 tia_gain, 1425 mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
1493 (u16) 1426 mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
1494 lna2_gain); 1427 mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
1495 biq1_gain -= 1; 1428 mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
1496 } 1429 mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
1430 mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
1431
1432 mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
1433 mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
1434
1435 wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
1436 write_phy_reg(pi, 0x6da, 0xffff);
1437 or_phy_reg(pi, 0x6db, 0x3);
1438 wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
1439 wlc_lcnphy_rx_gain_override_enable(pi, true);
1440
1441 tia_gain = 8;
1442 rx_pwr_threshold = 950;
1443 while (tia_gain > 0) {
1497 tia_gain -= 1; 1444 tia_gain -= 1;
1445 wlc_lcnphy_set_rx_gain_by_distribution(pi,
1446 0, 0, 2, 2,
1447 (u16)
1448 tia_gain, 1, 0);
1449 udelay(500);
1450
1451 received_power =
1452 wlc_lcnphy_measure_digital_power(pi, 2000);
1453 if (received_power < rx_pwr_threshold)
1454 break;
1498 } 1455 }
1499 lna2_gain -= 1; 1456 result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
1500 }
1501 1457
1502 if (set_gain) 1458 wlc_lcnphy_stop_tx_tone(pi);
1503 result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
1504 else
1505 result = false;
1506 1459
1507 wlc_lcnphy_stop_tx_tone(pi); 1460 write_phy_reg(pi, 0x631, Core1TxControl_old);
1508 1461
1509 write_phy_reg(pi, 0x631, Core1TxControl_old); 1462 write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
1510 1463 write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
1511 write_phy_reg(pi, 0x44c, RFOverrideVal0_old); 1464 write_phy_reg(pi, 0x4b0, rfoverride2_old);
1512 write_phy_reg(pi, 0x44d, RFOverrideVal0_old); 1465 write_phy_reg(pi, 0x4b1, rfoverride2val_old);
1513 write_phy_reg(pi, 0x4b0, rfoverride2_old); 1466 write_phy_reg(pi, 0x4f9, rfoverride3_old);
1514 write_phy_reg(pi, 0x4b1, rfoverride2val_old); 1467 write_phy_reg(pi, 0x4fa, rfoverride3val_old);
1515 write_phy_reg(pi, 0x4f9, rfoverride3_old); 1468 write_phy_reg(pi, 0x938, rfoverride4_old);
1516 write_phy_reg(pi, 0x4fa, rfoverride3val_old); 1469 write_phy_reg(pi, 0x939, rfoverride4val_old);
1517 write_phy_reg(pi, 0x938, rfoverride4_old); 1470 write_phy_reg(pi, 0x43b, afectrlovr_old);
1518 write_phy_reg(pi, 0x939, rfoverride4val_old); 1471 write_phy_reg(pi, 0x43c, afectrlovrval_old);
1519 write_phy_reg(pi, 0x43b, afectrlovr_old); 1472 write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
1520 write_phy_reg(pi, 0x43c, afectrlovrval_old); 1473 write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
1521 write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
1522 write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
1523 1474
1524 wlc_lcnphy_clear_trsw_override(pi); 1475 wlc_lcnphy_clear_trsw_override(pi);
1525 1476
1526 mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); 1477 mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
1527 1478
1528 for (i = 0; i < 11; i++) 1479 for (i = 0; i < 11; i++)
1529 write_radio_reg(pi, rxiq_cal_rf_reg[i], 1480 write_radio_reg(pi, rxiq_cal_rf_reg[i],
1530 values_to_save[i]); 1481 values_to_save[i]);
1531 1482
1532 if (tx_gain_override_old) 1483 if (tx_gain_override_old)
1533 wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); 1484 wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
1534 else 1485 else
1535 wlc_lcnphy_disable_tx_gain_override(pi); 1486 wlc_lcnphy_disable_tx_gain_override(pi);
1536 1487
1537 wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); 1488 wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
1538 wlc_lcnphy_rx_gain_override_enable(pi, false); 1489 wlc_lcnphy_rx_gain_override_enable(pi, false);
1490 }
1539 1491
1540cal_done: 1492cal_done:
1541 kfree(ptr); 1493 kfree(ptr);
@@ -1829,17 +1781,6 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
1829 write_radio_reg(pi, RADIO_2064_REG038, 3); 1781 write_radio_reg(pi, RADIO_2064_REG038, 3);
1830 write_radio_reg(pi, RADIO_2064_REG091, 7); 1782 write_radio_reg(pi, RADIO_2064_REG091, 7);
1831 } 1783 }
1832
1833 if (!(pi->sh->boardflags & BFL_FEM)) {
1834 u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc,
1835 0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0};
1836
1837 write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
1838 write_radio_reg(pi, RADIO_2064_REG091, 0x3);
1839 write_radio_reg(pi, RADIO_2064_REG038, 0x3);
1840
1841 write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
1842 }
1843} 1784}
1844 1785
1845static int 1786static int
@@ -2034,16 +1975,6 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
2034 } else { 1975 } else {
2035 mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); 1976 mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
2036 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); 1977 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
2037 mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
2038 mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
2039 mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
2040 mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
2041 mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
2042 mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
2043 mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
2044 mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
2045 mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
2046 mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
2047 } 1978 }
2048 } else { 1979 } else {
2049 mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); 1980 mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
@@ -2130,14 +2061,12 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
2130 (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); 2061 (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
2131 2062
2132 mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); 2063 mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
2133 mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
2134} 2064}
2135 2065
2136static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) 2066static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
2137{ 2067{
2138 struct phytbl_info tab; 2068 struct phytbl_info tab;
2139 u32 rfseq, ind; 2069 u32 rfseq, ind;
2140 u8 tssi_sel;
2141 2070
2142 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; 2071 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
2143 tab.tbl_width = 32; 2072 tab.tbl_width = 32;
@@ -2159,13 +2088,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
2159 2088
2160 mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); 2089 mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
2161 2090
2162 if (pi->sh->boardflags & BFL_FEM) { 2091 wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
2163 tssi_sel = 0x1;
2164 wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
2165 } else {
2166 tssi_sel = 0xe;
2167 wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA);
2168 }
2169 mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); 2092 mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
2170 2093
2171 mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); 2094 mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
@@ -2201,10 +2124,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
2201 mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); 2124 mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
2202 2125
2203 if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { 2126 if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
2204 mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel); 2127 mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);
2205 mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); 2128 mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
2206 } else { 2129 } else {
2207 mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
2208 mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); 2130 mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
2209 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); 2131 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
2210 } 2132 }
@@ -2251,10 +2173,6 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
2251 2173
2252 mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); 2174 mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
2253 2175
2254 mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
2255 mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
2256 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
2257
2258 wlc_lcnphy_pwrctrl_rssiparams(pi); 2176 wlc_lcnphy_pwrctrl_rssiparams(pi);
2259} 2177}
2260 2178
@@ -2873,8 +2791,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
2873 read_radio_reg(pi, RADIO_2064_REG007) & 1; 2791 read_radio_reg(pi, RADIO_2064_REG007) & 1;
2874 u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; 2792 u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
2875 u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; 2793 u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
2876 u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
2877
2878 idleTssi = read_phy_reg(pi, 0x4ab); 2794 idleTssi = read_phy_reg(pi, 0x4ab);
2879 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & 2795 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
2880 MCTL_EN_MAC)); 2796 MCTL_EN_MAC));
@@ -2892,12 +2808,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
2892 mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); 2808 mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
2893 mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); 2809 mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
2894 wlc_lcnphy_tssi_setup(pi); 2810 wlc_lcnphy_tssi_setup(pi);
2895
2896 mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
2897 mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
2898
2899 wlc_lcnphy_set_bbmult(pi, 0x0);
2900
2901 wlc_phy_do_dummy_tx(pi, true, OFF); 2811 wlc_phy_do_dummy_tx(pi, true, OFF);
2902 idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) 2812 idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
2903 >> 0); 2813 >> 0);
@@ -2919,7 +2829,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
2919 2829
2920 mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); 2830 mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
2921 2831
2922 wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
2923 wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); 2832 wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
2924 wlc_lcnphy_set_tx_gain(pi, &old_gains); 2833 wlc_lcnphy_set_tx_gain(pi, &old_gains);
2925 wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); 2834 wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
@@ -3133,11 +3042,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
3133 wlc_lcnphy_write_table(pi, &tab); 3042 wlc_lcnphy_write_table(pi, &tab);
3134 tab.tbl_offset++; 3043 tab.tbl_offset++;
3135 } 3044 }
3136 mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
3137 mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
3138 mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
3139 mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
3140 mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
3141 3045
3142 mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); 3046 mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
3143 3047
@@ -3939,6 +3843,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
3939 target_gains.pad_gain = 21; 3843 target_gains.pad_gain = 21;
3940 target_gains.dac_gain = 0; 3844 target_gains.dac_gain = 0;
3941 wlc_lcnphy_set_tx_gain(pi, &target_gains); 3845 wlc_lcnphy_set_tx_gain(pi, &target_gains);
3846 wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
3942 3847
3943 if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { 3848 if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
3944 3849
@@ -3949,7 +3854,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
3949 lcnphy_recal ? LCNPHY_CAL_RECAL : 3854 lcnphy_recal ? LCNPHY_CAL_RECAL :
3950 LCNPHY_CAL_FULL), false); 3855 LCNPHY_CAL_FULL), false);
3951 } else { 3856 } else {
3952 wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
3953 wlc_lcnphy_tx_iqlo_soft_cal_full(pi); 3857 wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
3954 } 3858 }
3955 3859
@@ -4374,22 +4278,17 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
4374 if (CHSPEC_IS5G(pi->radio_chanspec)) 4278 if (CHSPEC_IS5G(pi->radio_chanspec))
4375 pa_gain = 0x70; 4279 pa_gain = 0x70;
4376 else 4280 else
4377 pa_gain = 0x60; 4281 pa_gain = 0x70;
4378 4282
4379 if (pi->sh->boardflags & BFL_FEM) 4283 if (pi->sh->boardflags & BFL_FEM)
4380 pa_gain = 0x10; 4284 pa_gain = 0x10;
4381
4382 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; 4285 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
4383 tab.tbl_width = 32; 4286 tab.tbl_width = 32;
4384 tab.tbl_len = 1; 4287 tab.tbl_len = 1;
4385 tab.tbl_ptr = &val; 4288 tab.tbl_ptr = &val;
4386 4289
4387 for (j = 0; j < 128; j++) { 4290 for (j = 0; j < 128; j++) {
4388 if (pi->sh->boardflags & BFL_FEM) 4291 gm_gain = gain_table[j].gm;
4389 gm_gain = gain_table[j].gm;
4390 else
4391 gm_gain = 15;
4392
4393 val = (((u32) pa_gain << 24) | 4292 val = (((u32) pa_gain << 24) |
4394 (gain_table[j].pad << 16) | 4293 (gain_table[j].pad << 16) |
4395 (gain_table[j].pga << 8) | gm_gain); 4294 (gain_table[j].pga << 8) | gm_gain);
@@ -4600,10 +4499,7 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
4600 4499
4601 write_phy_reg(pi, 0x4ea, 0x4688); 4500 write_phy_reg(pi, 0x4ea, 0x4688);
4602 4501
4603 if (pi->sh->boardflags & BFL_FEM) 4502 mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
4604 mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
4605 else
4606 mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
4607 4503
4608 mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); 4504 mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
4609 4505
@@ -4614,13 +4510,6 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
4614 wlc_lcnphy_rcal(pi); 4510 wlc_lcnphy_rcal(pi);
4615 4511
4616 wlc_lcnphy_rc_cal(pi); 4512 wlc_lcnphy_rc_cal(pi);
4617
4618 if (!(pi->sh->boardflags & BFL_FEM)) {
4619 write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
4620 write_radio_reg(pi, RADIO_2064_REG033, 0x19);
4621 write_radio_reg(pi, RADIO_2064_REG039, 0xe);
4622 }
4623
4624} 4513}
4625 4514
4626static void wlc_lcnphy_radio_init(struct brcms_phy *pi) 4515static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
@@ -4650,20 +4539,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
4650 wlc_lcnphy_write_table(pi, &tab); 4539 wlc_lcnphy_write_table(pi, &tab);
4651 } 4540 }
4652 4541
4653 if (!(pi->sh->boardflags & BFL_FEM)) { 4542 tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
4654 tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; 4543 tab.tbl_width = 16;
4655 tab.tbl_width = 16; 4544 tab.tbl_ptr = &val;
4656 tab.tbl_ptr = &val; 4545 tab.tbl_len = 1;
4657 tab.tbl_len = 1;
4658 4546
4659 val = 150; 4547 val = 114;
4660 tab.tbl_offset = 0; 4548 tab.tbl_offset = 0;
4661 wlc_lcnphy_write_table(pi, &tab); 4549 wlc_lcnphy_write_table(pi, &tab);
4662 4550
4663 val = 220; 4551 val = 130;
4664 tab.tbl_offset = 1; 4552 tab.tbl_offset = 1;
4665 wlc_lcnphy_write_table(pi, &tab); 4553 wlc_lcnphy_write_table(pi, &tab);
4666 } 4554
4555 val = 6;
4556 tab.tbl_offset = 8;
4557 wlc_lcnphy_write_table(pi, &tab);
4667 4558
4668 if (CHSPEC_IS2G(pi->radio_chanspec)) { 4559 if (CHSPEC_IS2G(pi->radio_chanspec)) {
4669 if (pi->sh->boardflags & BFL_FEM) 4560 if (pi->sh->boardflags & BFL_FEM)
@@ -5055,7 +4946,6 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
5055 wlc_lcnphy_load_tx_iir_filter(pi, true, 3); 4946 wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
5056 4947
5057 mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); 4948 mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
5058 wlc_lcnphy_tssi_setup(pi);
5059} 4949}
5060 4950
5061void wlc_phy_detach_lcnphy(struct brcms_phy *pi) 4951void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
@@ -5094,7 +4984,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
5094 if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) 4984 if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
5095 return false; 4985 return false;
5096 4986
5097 if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { 4987 if ((pi->sh->boardflags & BFL_FEM) &&
4988 (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
5098 if (pi_lcn->lcnphy_tempsense_option == 3) { 4989 if (pi_lcn->lcnphy_tempsense_option == 3) {
5099 pi->hwpwrctrl = true; 4990 pi->hwpwrctrl = true;
5100 pi->hwpwrctrl_capable = true; 4991 pi->hwpwrctrl_capable = true;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
index b7e95acc2084..622c01ca72c5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
@@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = {
1992}; 1992};
1993 1993
1994static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { 1994static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
1995 0x0009,
1996 0x000a, 1995 0x000a,
1997 0x0005,
1998 0x0006,
1999 0x0009, 1996 0x0009,
2000 0x000a,
2001 0x0005,
2002 0x0006, 1997 0x0006,
2003 0x0009,
2004 0x000a,
2005 0x0005, 1998 0x0005,
2006 0x0006,
2007 0x0009,
2008 0x000a, 1999 0x000a,
2009 0x0005,
2010 0x0006,
2011 0x0009, 2000 0x0009,
2012 0x000a,
2013 0x0005,
2014 0x0006, 2001 0x0006,
2015 0x0009,
2016 0x000a,
2017 0x0005, 2002 0x0005,
2018 0x0006,
2019 0x0009,
2020 0x000a, 2003 0x000a,
2021 0x0005,
2022 0x0006,
2023 0x0009, 2004 0x0009,
2024 0x000a,
2025 0x0005,
2026 0x0006, 2005 0x0006,
2027 0x0009,
2028 0x000a,
2029 0x0005, 2006 0x0005,
2030 0x0006,
2031 0x0009,
2032 0x000a, 2007 0x000a,
2033 0x0005,
2034 0x0006,
2035 0x0009, 2008 0x0009,
2036 0x000a,
2037 0x0005,
2038 0x0006, 2009 0x0006,
2039 0x0009,
2040 0x000a,
2041 0x0005, 2010 0x0005,
2042 0x0006, 2011 0x000a,
2043 0x0009, 2012 0x0009,
2013 0x0006,
2014 0x0005,
2044 0x000a, 2015 0x000a,
2016 0x0009,
2017 0x0006,
2045 0x0005, 2018 0x0005,
2019 0x000a,
2020 0x0009,
2046 0x0006, 2021 0x0006,
2022 0x0005,
2023 0x000a,
2047 0x0009, 2024 0x0009,
2025 0x0006,
2026 0x0005,
2048 0x000a, 2027 0x000a,
2028 0x0009,
2029 0x0006,
2049 0x0005, 2030 0x0005,
2031 0x000a,
2032 0x0009,
2050 0x0006, 2033 0x0006,
2034 0x0005,
2035 0x000a,
2051 0x0009, 2036 0x0009,
2037 0x0006,
2038 0x0005,
2052 0x000a, 2039 0x000a,
2040 0x0009,
2041 0x0006,
2053 0x0005, 2042 0x0005,
2043 0x000a,
2044 0x0009,
2054 0x0006, 2045 0x0006,
2046 0x0005,
2047 0x000a,
2055 0x0009, 2048 0x0009,
2049 0x0006,
2050 0x0005,
2056 0x000a, 2051 0x000a,
2052 0x0009,
2053 0x0006,
2057 0x0005, 2054 0x0005,
2055 0x000a,
2056 0x0009,
2058 0x0006, 2057 0x0006,
2058 0x0005,
2059}; 2059};
2060 2060
2061static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { 2061static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 3630a41df50d..c353b5f19c8c 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -475,6 +475,7 @@ il3945_tx_skb(struct il_priv *il,
475 dma_addr_t txcmd_phys; 475 dma_addr_t txcmd_phys;
476 int txq_id = skb_get_queue_mapping(skb); 476 int txq_id = skb_get_queue_mapping(skb);
477 u16 len, idx, hdr_len; 477 u16 len, idx, hdr_len;
478 u16 firstlen, secondlen;
478 u8 id; 479 u8 id;
479 u8 unicast; 480 u8 unicast;
480 u8 sta_id; 481 u8 sta_id;
@@ -589,21 +590,22 @@ il3945_tx_skb(struct il_priv *il,
589 len = 590 len =
590 sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) + 591 sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
591 hdr_len; 592 hdr_len;
592 len = (len + 3) & ~3; 593 firstlen = (len + 3) & ~3;
593 594
594 /* Physical address of this Tx command's header (not MAC header!), 595 /* Physical address of this Tx command's header (not MAC header!),
595 * within command buffer array. */ 596 * within command buffer array. */
596 txcmd_phys = 597 txcmd_phys =
597 pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE); 598 pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
599 PCI_DMA_TODEVICE);
598 if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys))) 600 if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
599 goto drop_unlock; 601 goto drop_unlock;
600 602
601 /* Set up TFD's 2nd entry to point directly to remainder of skb, 603 /* Set up TFD's 2nd entry to point directly to remainder of skb,
602 * if any (802.11 null frames have no payload). */ 604 * if any (802.11 null frames have no payload). */
603 len = skb->len - hdr_len; 605 secondlen = skb->len - hdr_len;
604 if (len) { 606 if (secondlen > 0) {
605 phys_addr = 607 phys_addr =
606 pci_map_single(il->pci_dev, skb->data + hdr_len, len, 608 pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
607 PCI_DMA_TODEVICE); 609 PCI_DMA_TODEVICE);
608 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) 610 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
609 goto drop_unlock; 611 goto drop_unlock;
@@ -611,12 +613,12 @@ il3945_tx_skb(struct il_priv *il,
611 613
612 /* Add buffer containing Tx command and MAC(!) header to TFD's 614 /* Add buffer containing Tx command and MAC(!) header to TFD's
613 * first entry */ 615 * first entry */
614 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0); 616 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
615 dma_unmap_addr_set(out_meta, mapping, txcmd_phys); 617 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
616 dma_unmap_len_set(out_meta, len, len); 618 dma_unmap_len_set(out_meta, len, firstlen);
617 if (len) 619 if (secondlen > 0)
618 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0, 620 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0,
619 U32_PAD(len)); 621 U32_PAD(secondlen));
620 622
621 if (!ieee80211_has_morefrags(hdr->frame_control)) { 623 if (!ieee80211_has_morefrags(hdr->frame_control)) {
622 txq->need_update = 1; 624 txq->need_update = 1;
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index e8324b5e5bfe..6c7493c2d698 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2152,7 +2152,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
2152 int rate_idx; 2152 int rate_idx;
2153 int i; 2153 int i;
2154 u32 rate; 2154 u32 rate;
2155 u8 use_green = il4965_rs_use_green(il, sta); 2155 u8 use_green;
2156 u8 active_tbl = 0; 2156 u8 active_tbl = 0;
2157 u8 valid_tx_ant; 2157 u8 valid_tx_ant;
2158 struct il_station_priv *sta_priv; 2158 struct il_station_priv *sta_priv;
@@ -2160,6 +2160,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
2160 if (!sta || !lq_sta) 2160 if (!sta || !lq_sta)
2161 return; 2161 return;
2162 2162
2163 use_green = il4965_rs_use_green(il, sta);
2163 sta_priv = (void *)sta->drv_priv; 2164 sta_priv = (void *)sta->drv_priv;
2164 2165
2165 i = lq_sta->last_txrate_idx; 2166 i = lq_sta->last_txrate_idx;
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 86ea5f4c3939..44ca0e57f9f7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -1262,6 +1262,15 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1262 } 1262 }
1263 1263
1264 /* 1264 /*
1265 * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag
1266 * in iwl_down but cancel the workers only later.
1267 */
1268 if (!priv->ucode_loaded) {
1269 IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id);
1270 return -EIO;
1271 }
1272
1273 /*
1265 * Synchronous commands from this op-mode must hold 1274 * Synchronous commands from this op-mode must hold
1266 * the mutex, this ensures we don't try to send two 1275 * the mutex, this ensures we don't try to send two
1267 * (or more) synchronous commands at a time. 1276 * (or more) synchronous commands at a time.
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 23be948cf162..a82b6b39d4ff 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1419,6 +1419,14 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1419 1419
1420 mutex_lock(&priv->mutex); 1420 mutex_lock(&priv->mutex);
1421 1421
1422 if (changes & BSS_CHANGED_IDLE && bss_conf->idle) {
1423 /*
1424 * If we go idle, then clearly no "passive-no-rx"
1425 * workaround is needed any more, this is a reset.
1426 */
1427 iwlagn_lift_passive_no_rx(priv);
1428 }
1429
1422 if (unlikely(!iwl_is_ready(priv))) { 1430 if (unlikely(!iwl_is_ready(priv))) {
1423 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 1431 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
1424 mutex_unlock(&priv->mutex); 1432 mutex_unlock(&priv->mutex);
@@ -1450,16 +1458,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1450 priv->timestamp = bss_conf->sync_tsf; 1458 priv->timestamp = bss_conf->sync_tsf;
1451 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 1459 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1452 } else { 1460 } else {
1453 /*
1454 * If we disassociate while there are pending
1455 * frames, just wake up the queues and let the
1456 * frames "escape" ... This shouldn't really
1457 * be happening to start with, but we should
1458 * not get stuck in this case either since it
1459 * can happen if userspace gets confused.
1460 */
1461 iwlagn_lift_passive_no_rx(priv);
1462
1463 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1461 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1464 1462
1465 if (ctx->ctxid == IWL_RXON_CTX_BSS) 1463 if (ctx->ctxid == IWL_RXON_CTX_BSS)
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 6aec2df3bb27..d1a670d7b10c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1192,7 +1192,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1192 memset(&info->status, 0, sizeof(info->status)); 1192 memset(&info->status, 0, sizeof(info->status));
1193 1193
1194 if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && 1194 if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
1195 iwl_is_associated_ctx(ctx) && ctx->vif && 1195 ctx->vif &&
1196 ctx->vif->type == NL80211_IFTYPE_STATION) { 1196 ctx->vif->type == NL80211_IFTYPE_STATION) {
1197 /* block and stop all queues */ 1197 /* block and stop all queues */
1198 priv->passive_no_rx = true; 1198 priv->passive_no_rx = true;
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 736fe9bb140e..1a4ac9236a44 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -367,6 +367,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
367 return -EIO; 367 return -EIO;
368 } 368 }
369 369
370 priv->ucode_loaded = true;
371
370 if (ucode_type != IWL_UCODE_WOWLAN) { 372 if (ucode_type != IWL_UCODE_WOWLAN) {
371 /* delay a bit to give rfkill time to run */ 373 /* delay a bit to give rfkill time to run */
372 msleep(5); 374 msleep(5);
@@ -380,8 +382,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
380 return ret; 382 return ret;
381 } 383 }
382 384
383 priv->ucode_loaded = true;
384
385 return 0; 385 return 0;
386} 386}
387 387
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 17bedc50e753..12c4f31ca8fb 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -475,6 +475,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
475 475
476 /* If platform's RF_KILL switch is NOT set to KILL */ 476 /* If platform's RF_KILL switch is NOT set to KILL */
477 hw_rfkill = iwl_is_rfkill_set(trans); 477 hw_rfkill = iwl_is_rfkill_set(trans);
478 if (hw_rfkill)
479 set_bit(STATUS_RFKILL, &trans_pcie->status);
480 else
481 clear_bit(STATUS_RFKILL, &trans_pcie->status);
478 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 482 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
479 if (hw_rfkill && !run_in_rfkill) 483 if (hw_rfkill && !run_in_rfkill)
480 return -ERFKILL; 484 return -ERFKILL;
@@ -641,6 +645,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
641 645
642static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 646static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
643{ 647{
648 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
644 bool hw_rfkill; 649 bool hw_rfkill;
645 int err; 650 int err;
646 651
@@ -656,6 +661,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
656 iwl_enable_rfkill_int(trans); 661 iwl_enable_rfkill_int(trans);
657 662
658 hw_rfkill = iwl_is_rfkill_set(trans); 663 hw_rfkill = iwl_is_rfkill_set(trans);
664 if (hw_rfkill)
665 set_bit(STATUS_RFKILL, &trans_pcie->status);
666 else
667 clear_bit(STATUS_RFKILL, &trans_pcie->status);
659 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 668 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
660 669
661 return 0; 670 return 0;
@@ -694,6 +703,10 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
694 * op_mode. 703 * op_mode.
695 */ 704 */
696 hw_rfkill = iwl_is_rfkill_set(trans); 705 hw_rfkill = iwl_is_rfkill_set(trans);
706 if (hw_rfkill)
707 set_bit(STATUS_RFKILL, &trans_pcie->status);
708 else
709 clear_bit(STATUS_RFKILL, &trans_pcie->status);
697 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 710 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
698 } 711 }
699} 712}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8595c16f74de..cb5c6792e3a8 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1264,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1264 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1264 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1265 int copy = 0; 1265 int copy = 0;
1266 1266
1267 if (!cmd->len) 1267 if (!cmd->len[i])
1268 continue; 1268 continue;
1269 1269
1270 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ 1270 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index a44023a7bd57..8aaf56ade4d9 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1892,7 +1892,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1892 } 1892 }
1893 } 1893 }
1894 1894
1895 for (i = 0; i < request->n_channels; i++) { 1895 for (i = 0; i < min_t(u32, request->n_channels,
1896 MWIFIEX_USER_SCAN_CHAN_MAX); i++) {
1896 chan = request->channels[i]; 1897 chan = request->channels[i];
1897 priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; 1898 priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
1898 priv->user_scan_cfg->chan_list[i].radio_type = chan->band; 1899 priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 20a6c5555873..b5c8b962ce12 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -157,6 +157,20 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
157 return -1; 157 return -1;
158 } 158 }
159 159
160 cmd_code = le16_to_cpu(host_cmd->command);
161 cmd_size = le16_to_cpu(host_cmd->size);
162
163 if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET &&
164 cmd_code != HostCmd_CMD_FUNC_SHUTDOWN &&
165 cmd_code != HostCmd_CMD_FUNC_INIT) {
166 dev_err(adapter->dev,
167 "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
168 cmd_code);
169 mwifiex_complete_cmd(adapter, cmd_node);
170 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
171 return -1;
172 }
173
160 /* Set command sequence number */ 174 /* Set command sequence number */
161 adapter->seq_num++; 175 adapter->seq_num++;
162 host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO 176 host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
@@ -168,9 +182,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
168 adapter->curr_cmd = cmd_node; 182 adapter->curr_cmd = cmd_node;
169 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); 183 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
170 184
171 cmd_code = le16_to_cpu(host_cmd->command);
172 cmd_size = le16_to_cpu(host_cmd->size);
173
174 /* Adjust skb length */ 185 /* Adjust skb length */
175 if (cmd_node->cmd_skb->len > cmd_size) 186 if (cmd_node->cmd_skb->len > cmd_size)
176 /* 187 /*
@@ -484,8 +495,6 @@ int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
484 495
485 ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid, 496 ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid,
486 data_buf); 497 data_buf);
487 if (!ret)
488 ret = mwifiex_wait_queue_complete(adapter);
489 498
490 return ret; 499 return ret;
491} 500}
@@ -588,9 +597,10 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
588 if (cmd_no == HostCmd_CMD_802_11_SCAN) { 597 if (cmd_no == HostCmd_CMD_802_11_SCAN) {
589 mwifiex_queue_scan_cmd(priv, cmd_node); 598 mwifiex_queue_scan_cmd(priv, cmd_node);
590 } else { 599 } else {
591 adapter->cmd_queued = cmd_node;
592 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); 600 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
593 queue_work(adapter->workqueue, &adapter->main_work); 601 queue_work(adapter->workqueue, &adapter->main_work);
602 if (cmd_node->wait_q_enabled)
603 ret = mwifiex_wait_queue_complete(adapter, cmd_node);
594 } 604 }
595 605
596 return ret; 606 return ret;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index e38aa9b3663d..0ff4c37ab42a 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -709,6 +709,14 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
709 return ret; 709 return ret;
710 } 710 }
711 711
712 /* cancel current command */
713 if (adapter->curr_cmd) {
714 dev_warn(adapter->dev, "curr_cmd is still in processing\n");
715 del_timer(&adapter->cmd_timer);
716 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
717 adapter->curr_cmd = NULL;
718 }
719
712 /* shut down mwifiex */ 720 /* shut down mwifiex */
713 dev_dbg(adapter->dev, "info: shutdown mwifiex...\n"); 721 dev_dbg(adapter->dev, "info: shutdown mwifiex...\n");
714 722
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 553adfb0aa81..7035ade9af74 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -723,7 +723,6 @@ struct mwifiex_adapter {
723 u16 cmd_wait_q_required; 723 u16 cmd_wait_q_required;
724 struct mwifiex_wait_queue cmd_wait_q; 724 struct mwifiex_wait_queue cmd_wait_q;
725 u8 scan_wait_q_woken; 725 u8 scan_wait_q_woken;
726 struct cmd_ctrl_node *cmd_queued;
727 spinlock_t queue_lock; /* lock for tx queues */ 726 spinlock_t queue_lock; /* lock for tx queues */
728 struct completion fw_load; 727 struct completion fw_load;
729 u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; 728 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
@@ -1018,7 +1017,8 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
1018 struct mwifiex_multicast_list *mcast_list); 1017 struct mwifiex_multicast_list *mcast_list);
1019int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist, 1018int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
1020 struct net_device *dev); 1019 struct net_device *dev);
1021int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter); 1020int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
1021 struct cmd_ctrl_node *cmd_queued);
1022int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, 1022int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
1023 struct cfg80211_ssid *req_ssid); 1023 struct cfg80211_ssid *req_ssid);
1024int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type); 1024int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 5c395e2e6a2b..feb204613397 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -1508,6 +1508,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
1508 } 1508 }
1509 memcpy(adapter->upld_buf, skb->data, 1509 memcpy(adapter->upld_buf, skb->data,
1510 min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); 1510 min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
1511 skb_push(skb, INTF_HEADER_LEN);
1511 if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, 1512 if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
1512 PCI_DMA_FROMDEVICE)) 1513 PCI_DMA_FROMDEVICE))
1513 return -1; 1514 return -1;
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index bb60c2754a97..e7f6deaf715e 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1388,10 +1388,15 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
1388 list_del(&cmd_node->list); 1388 list_del(&cmd_node->list);
1389 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 1389 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1390 flags); 1390 flags);
1391 adapter->cmd_queued = cmd_node;
1392 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, 1391 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
1393 true); 1392 true);
1394 queue_work(adapter->workqueue, &adapter->main_work); 1393 queue_work(adapter->workqueue, &adapter->main_work);
1394
1395 /* Perform internal scan synchronously */
1396 if (!priv->scan_request) {
1397 dev_dbg(adapter->dev, "wait internal scan\n");
1398 mwifiex_wait_queue_complete(adapter, cmd_node);
1399 }
1395 } else { 1400 } else {
1396 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 1401 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1397 flags); 1402 flags);
@@ -1790,7 +1795,12 @@ check_next_scan:
1790 /* Need to indicate IOCTL complete */ 1795 /* Need to indicate IOCTL complete */
1791 if (adapter->curr_cmd->wait_q_enabled) { 1796 if (adapter->curr_cmd->wait_q_enabled) {
1792 adapter->cmd_wait_q.status = 0; 1797 adapter->cmd_wait_q.status = 0;
1793 mwifiex_complete_cmd(adapter, adapter->curr_cmd); 1798 if (!priv->scan_request) {
1799 dev_dbg(adapter->dev,
1800 "complete internal scan\n");
1801 mwifiex_complete_cmd(adapter,
1802 adapter->curr_cmd);
1803 }
1794 } 1804 }
1795 if (priv->report_scan_result) 1805 if (priv->report_scan_result)
1796 priv->report_scan_result = false; 1806 priv->report_scan_result = false;
@@ -1946,9 +1956,6 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
1946 /* Normal scan */ 1956 /* Normal scan */
1947 ret = mwifiex_scan_networks(priv, NULL); 1957 ret = mwifiex_scan_networks(priv, NULL);
1948 1958
1949 if (!ret)
1950 ret = mwifiex_wait_queue_complete(priv->adapter);
1951
1952 up(&priv->async_sem); 1959 up(&priv->async_sem);
1953 1960
1954 return ret; 1961 return ret;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 9f33c92c90f5..13100f8de3db 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -54,16 +54,10 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
54 * This function waits on a cmd wait queue. It also cancels the pending 54 * This function waits on a cmd wait queue. It also cancels the pending
55 * request after waking up, in case of errors. 55 * request after waking up, in case of errors.
56 */ 56 */
57int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter) 57int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
58 struct cmd_ctrl_node *cmd_queued)
58{ 59{
59 int status; 60 int status;
60 struct cmd_ctrl_node *cmd_queued;
61
62 if (!adapter->cmd_queued)
63 return 0;
64
65 cmd_queued = adapter->cmd_queued;
66 adapter->cmd_queued = NULL;
67 61
68 dev_dbg(adapter->dev, "cmd pending\n"); 62 dev_dbg(adapter->dev, "cmd pending\n");
69 atomic_inc(&adapter->cmd_pending); 63 atomic_inc(&adapter->cmd_pending);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 2bf4efa33186..76cd47eb901e 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -20,6 +20,7 @@ if RT2X00
20config RT2400PCI 20config RT2400PCI
21 tristate "Ralink rt2400 (PCI/PCMCIA) support" 21 tristate "Ralink rt2400 (PCI/PCMCIA) support"
22 depends on PCI 22 depends on PCI
23 select RT2X00_LIB_MMIO
23 select RT2X00_LIB_PCI 24 select RT2X00_LIB_PCI
24 select EEPROM_93CX6 25 select EEPROM_93CX6
25 ---help--- 26 ---help---
@@ -31,6 +32,7 @@ config RT2400PCI
31config RT2500PCI 32config RT2500PCI
32 tristate "Ralink rt2500 (PCI/PCMCIA) support" 33 tristate "Ralink rt2500 (PCI/PCMCIA) support"
33 depends on PCI 34 depends on PCI
35 select RT2X00_LIB_MMIO
34 select RT2X00_LIB_PCI 36 select RT2X00_LIB_PCI
35 select EEPROM_93CX6 37 select EEPROM_93CX6
36 ---help--- 38 ---help---
@@ -43,6 +45,7 @@ config RT61PCI
43 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" 45 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
44 depends on PCI 46 depends on PCI
45 select RT2X00_LIB_PCI 47 select RT2X00_LIB_PCI
48 select RT2X00_LIB_MMIO
46 select RT2X00_LIB_FIRMWARE 49 select RT2X00_LIB_FIRMWARE
47 select RT2X00_LIB_CRYPTO 50 select RT2X00_LIB_CRYPTO
48 select CRC_ITU_T 51 select CRC_ITU_T
@@ -57,6 +60,7 @@ config RT2800PCI
57 tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" 60 tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
58 depends on PCI || SOC_RT288X || SOC_RT305X 61 depends on PCI || SOC_RT288X || SOC_RT305X
59 select RT2800_LIB 62 select RT2800_LIB
63 select RT2X00_LIB_MMIO
60 select RT2X00_LIB_PCI if PCI 64 select RT2X00_LIB_PCI if PCI
61 select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X 65 select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
62 select RT2X00_LIB_FIRMWARE 66 select RT2X00_LIB_FIRMWARE
@@ -185,6 +189,9 @@ endif
185config RT2800_LIB 189config RT2800_LIB
186 tristate 190 tristate
187 191
192config RT2X00_LIB_MMIO
193 tristate
194
188config RT2X00_LIB_PCI 195config RT2X00_LIB_PCI
189 tristate 196 tristate
190 select RT2X00_LIB 197 select RT2X00_LIB
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index 349d5b8284a4..f069d8bc5b67 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -9,6 +9,7 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o
9rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o 9rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o
10 10
11obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o 11obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
12obj-$(CONFIG_RT2X00_LIB_MMIO) += rt2x00mmio.o
12obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o 13obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
13obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o 14obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
14obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o 15obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 221beaaa83f1..dcfb54e0c516 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35 35
36#include "rt2x00.h" 36#include "rt2x00.h"
37#include "rt2x00mmio.h"
37#include "rt2x00pci.h" 38#include "rt2x00pci.h"
38#include "rt2400pci.h" 39#include "rt2400pci.h"
39 40
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 39edc59e8d03..e1d2dc9ed28a 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35 35
36#include "rt2x00.h" 36#include "rt2x00.h"
37#include "rt2x00mmio.h"
37#include "rt2x00pci.h" 38#include "rt2x00pci.h"
38#include "rt2500pci.h" 39#include "rt2500pci.h"
39 40
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index ded73da4de0b..ba5a05625aaa 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -41,6 +41,7 @@
41#include <linux/eeprom_93cx6.h> 41#include <linux/eeprom_93cx6.h>
42 42
43#include "rt2x00.h" 43#include "rt2x00.h"
44#include "rt2x00mmio.h"
44#include "rt2x00pci.h" 45#include "rt2x00pci.h"
45#include "rt2x00soc.h" 46#include "rt2x00soc.h"
46#include "rt2800lib.h" 47#include "rt2800lib.h"
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/rt2x00/rt2x00mmio.c
new file mode 100644
index 000000000000..d84a680ba0c9
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c
@@ -0,0 +1,216 @@
1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00mmio
23 Abstract: rt2x00 generic mmio device routines.
24 */
25
26#include <linux/dma-mapping.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/slab.h>
30
31#include "rt2x00.h"
32#include "rt2x00mmio.h"
33
34/*
35 * Register access.
36 */
37int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
38 const unsigned int offset,
39 const struct rt2x00_field32 field,
40 u32 *reg)
41{
42 unsigned int i;
43
44 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
45 return 0;
46
47 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
48 rt2x00pci_register_read(rt2x00dev, offset, reg);
49 if (!rt2x00_get_field32(*reg, field))
50 return 1;
51 udelay(REGISTER_BUSY_DELAY);
52 }
53
54 printk_once(KERN_ERR "%s() Indirect register access failed: "
55 "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
56 *reg = ~0;
57
58 return 0;
59}
60EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
61
62bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
63{
64 struct data_queue *queue = rt2x00dev->rx;
65 struct queue_entry *entry;
66 struct queue_entry_priv_pci *entry_priv;
67 struct skb_frame_desc *skbdesc;
68 int max_rx = 16;
69
70 while (--max_rx) {
71 entry = rt2x00queue_get_entry(queue, Q_INDEX);
72 entry_priv = entry->priv_data;
73
74 if (rt2x00dev->ops->lib->get_entry_state(entry))
75 break;
76
77 /*
78 * Fill in desc fields of the skb descriptor
79 */
80 skbdesc = get_skb_frame_desc(entry->skb);
81 skbdesc->desc = entry_priv->desc;
82 skbdesc->desc_len = entry->queue->desc_size;
83
84 /*
85 * DMA is already done, notify rt2x00lib that
86 * it finished successfully.
87 */
88 rt2x00lib_dmastart(entry);
89 rt2x00lib_dmadone(entry);
90
91 /*
92 * Send the frame to rt2x00lib for further processing.
93 */
94 rt2x00lib_rxdone(entry, GFP_ATOMIC);
95 }
96
97 return !max_rx;
98}
99EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
100
101void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
102{
103 unsigned int i;
104
105 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
106 msleep(10);
107}
108EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
109
110/*
111 * Device initialization handlers.
112 */
113static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
114 struct data_queue *queue)
115{
116 struct queue_entry_priv_pci *entry_priv;
117 void *addr;
118 dma_addr_t dma;
119 unsigned int i;
120
121 /*
122 * Allocate DMA memory for descriptor and buffer.
123 */
124 addr = dma_alloc_coherent(rt2x00dev->dev,
125 queue->limit * queue->desc_size,
126 &dma, GFP_KERNEL);
127 if (!addr)
128 return -ENOMEM;
129
130 memset(addr, 0, queue->limit * queue->desc_size);
131
132 /*
133 * Initialize all queue entries to contain valid addresses.
134 */
135 for (i = 0; i < queue->limit; i++) {
136 entry_priv = queue->entries[i].priv_data;
137 entry_priv->desc = addr + i * queue->desc_size;
138 entry_priv->desc_dma = dma + i * queue->desc_size;
139 }
140
141 return 0;
142}
143
144static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
145 struct data_queue *queue)
146{
147 struct queue_entry_priv_pci *entry_priv =
148 queue->entries[0].priv_data;
149
150 if (entry_priv->desc)
151 dma_free_coherent(rt2x00dev->dev,
152 queue->limit * queue->desc_size,
153 entry_priv->desc, entry_priv->desc_dma);
154 entry_priv->desc = NULL;
155}
156
157int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
158{
159 struct data_queue *queue;
160 int status;
161
162 /*
163 * Allocate DMA
164 */
165 queue_for_each(rt2x00dev, queue) {
166 status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
167 if (status)
168 goto exit;
169 }
170
171 /*
172 * Register interrupt handler.
173 */
174 status = request_irq(rt2x00dev->irq,
175 rt2x00dev->ops->lib->irq_handler,
176 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
177 if (status) {
178 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
179 rt2x00dev->irq, status);
180 goto exit;
181 }
182
183 return 0;
184
185exit:
186 queue_for_each(rt2x00dev, queue)
187 rt2x00pci_free_queue_dma(rt2x00dev, queue);
188
189 return status;
190}
191EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
192
193void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
194{
195 struct data_queue *queue;
196
197 /*
198 * Free irq line.
199 */
200 free_irq(rt2x00dev->irq, rt2x00dev);
201
202 /*
203 * Free DMA
204 */
205 queue_for_each(rt2x00dev, queue)
206 rt2x00pci_free_queue_dma(rt2x00dev, queue);
207}
208EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
209
210/*
211 * rt2x00mmio module information.
212 */
213MODULE_AUTHOR(DRV_PROJECT);
214MODULE_VERSION(DRV_VERSION);
215MODULE_DESCRIPTION("rt2x00 mmio library");
216MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/rt2x00/rt2x00mmio.h
new file mode 100644
index 000000000000..4ecaf60175bf
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00mmio.h
@@ -0,0 +1,119 @@
1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00mmio
23 Abstract: Data structures for the rt2x00mmio module.
24 */
25
26#ifndef RT2X00MMIO_H
27#define RT2X00MMIO_H
28
29#include <linux/io.h>
30
31/*
32 * Register access.
33 */
34static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
35 const unsigned int offset,
36 u32 *value)
37{
38 *value = readl(rt2x00dev->csr.base + offset);
39}
40
41static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
42 const unsigned int offset,
43 void *value, const u32 length)
44{
45 memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
46}
47
48static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
49 const unsigned int offset,
50 u32 value)
51{
52 writel(value, rt2x00dev->csr.base + offset);
53}
54
55static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
56 const unsigned int offset,
57 const void *value,
58 const u32 length)
59{
60 __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
61}
62
63/**
64 * rt2x00pci_regbusy_read - Read from register with busy check
65 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
66 * @offset: Register offset
67 * @field: Field to check if register is busy
68 * @reg: Pointer to where register contents should be stored
69 *
70 * This function will read the given register, and checks if the
71 * register is busy. If it is, it will sleep for a couple of
72 * microseconds before reading the register again. If the register
73 * is not read after a certain timeout, this function will return
74 * FALSE.
75 */
76int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
77 const unsigned int offset,
78 const struct rt2x00_field32 field,
79 u32 *reg);
80
81/**
82 * struct queue_entry_priv_pci: Per entry PCI specific information
83 *
84 * @desc: Pointer to device descriptor
85 * @desc_dma: DMA pointer to &desc.
86 * @data: Pointer to device's entry memory.
87 * @data_dma: DMA pointer to &data.
88 */
89struct queue_entry_priv_pci {
90 __le32 *desc;
91 dma_addr_t desc_dma;
92};
93
94/**
95 * rt2x00pci_rxdone - Handle RX done events
96 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
97 *
98 * Returns true if there are still rx frames pending and false if all
99 * pending rx frames were processed.
100 */
101bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
102
103/**
104 * rt2x00pci_flush_queue - Flush data queue
105 * @queue: Data queue to stop
106 * @drop: True to drop all pending frames.
107 *
108 * This will wait for a maximum of 100ms, waiting for the queues
109 * to become empty.
110 */
111void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
112
113/*
114 * Device initialization handlers.
115 */
116int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
117void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
118
119#endif /* RT2X00MMIO_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index a0c8caef3b0a..e87865e33113 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -33,182 +33,6 @@
33#include "rt2x00pci.h" 33#include "rt2x00pci.h"
34 34
35/* 35/*
36 * Register access.
37 */
38int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
39 const unsigned int offset,
40 const struct rt2x00_field32 field,
41 u32 *reg)
42{
43 unsigned int i;
44
45 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
46 return 0;
47
48 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
49 rt2x00pci_register_read(rt2x00dev, offset, reg);
50 if (!rt2x00_get_field32(*reg, field))
51 return 1;
52 udelay(REGISTER_BUSY_DELAY);
53 }
54
55 ERROR(rt2x00dev, "Indirect register access failed: "
56 "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
57 *reg = ~0;
58
59 return 0;
60}
61EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
62
63bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
64{
65 struct data_queue *queue = rt2x00dev->rx;
66 struct queue_entry *entry;
67 struct queue_entry_priv_pci *entry_priv;
68 struct skb_frame_desc *skbdesc;
69 int max_rx = 16;
70
71 while (--max_rx) {
72 entry = rt2x00queue_get_entry(queue, Q_INDEX);
73 entry_priv = entry->priv_data;
74
75 if (rt2x00dev->ops->lib->get_entry_state(entry))
76 break;
77
78 /*
79 * Fill in desc fields of the skb descriptor
80 */
81 skbdesc = get_skb_frame_desc(entry->skb);
82 skbdesc->desc = entry_priv->desc;
83 skbdesc->desc_len = entry->queue->desc_size;
84
85 /*
86 * DMA is already done, notify rt2x00lib that
87 * it finished successfully.
88 */
89 rt2x00lib_dmastart(entry);
90 rt2x00lib_dmadone(entry);
91
92 /*
93 * Send the frame to rt2x00lib for further processing.
94 */
95 rt2x00lib_rxdone(entry, GFP_ATOMIC);
96 }
97
98 return !max_rx;
99}
100EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
101
102void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
103{
104 unsigned int i;
105
106 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
107 msleep(10);
108}
109EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
110
111/*
112 * Device initialization handlers.
113 */
114static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
115 struct data_queue *queue)
116{
117 struct queue_entry_priv_pci *entry_priv;
118 void *addr;
119 dma_addr_t dma;
120 unsigned int i;
121
122 /*
123 * Allocate DMA memory for descriptor and buffer.
124 */
125 addr = dma_alloc_coherent(rt2x00dev->dev,
126 queue->limit * queue->desc_size,
127 &dma, GFP_KERNEL);
128 if (!addr)
129 return -ENOMEM;
130
131 memset(addr, 0, queue->limit * queue->desc_size);
132
133 /*
134 * Initialize all queue entries to contain valid addresses.
135 */
136 for (i = 0; i < queue->limit; i++) {
137 entry_priv = queue->entries[i].priv_data;
138 entry_priv->desc = addr + i * queue->desc_size;
139 entry_priv->desc_dma = dma + i * queue->desc_size;
140 }
141
142 return 0;
143}
144
145static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
146 struct data_queue *queue)
147{
148 struct queue_entry_priv_pci *entry_priv =
149 queue->entries[0].priv_data;
150
151 if (entry_priv->desc)
152 dma_free_coherent(rt2x00dev->dev,
153 queue->limit * queue->desc_size,
154 entry_priv->desc, entry_priv->desc_dma);
155 entry_priv->desc = NULL;
156}
157
158int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
159{
160 struct data_queue *queue;
161 int status;
162
163 /*
164 * Allocate DMA
165 */
166 queue_for_each(rt2x00dev, queue) {
167 status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
168 if (status)
169 goto exit;
170 }
171
172 /*
173 * Register interrupt handler.
174 */
175 status = request_irq(rt2x00dev->irq,
176 rt2x00dev->ops->lib->irq_handler,
177 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
178 if (status) {
179 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
180 rt2x00dev->irq, status);
181 goto exit;
182 }
183
184 return 0;
185
186exit:
187 queue_for_each(rt2x00dev, queue)
188 rt2x00pci_free_queue_dma(rt2x00dev, queue);
189
190 return status;
191}
192EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
193
194void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
195{
196 struct data_queue *queue;
197
198 /*
199 * Free irq line.
200 */
201 free_irq(rt2x00dev->irq, rt2x00dev);
202
203 /*
204 * Free DMA
205 */
206 queue_for_each(rt2x00dev, queue)
207 rt2x00pci_free_queue_dma(rt2x00dev, queue);
208}
209EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
210
211/*
212 * PCI driver handlers. 36 * PCI driver handlers.
213 */ 37 */
214static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) 38static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index e2c99f2b9a14..60d90b20f8b9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -36,94 +36,6 @@
36#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) 36#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops)
37 37
38/* 38/*
39 * Register access.
40 */
41static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
42 const unsigned int offset,
43 u32 *value)
44{
45 *value = readl(rt2x00dev->csr.base + offset);
46}
47
48static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
49 const unsigned int offset,
50 void *value, const u32 length)
51{
52 memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
53}
54
55static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
56 const unsigned int offset,
57 u32 value)
58{
59 writel(value, rt2x00dev->csr.base + offset);
60}
61
62static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
63 const unsigned int offset,
64 const void *value,
65 const u32 length)
66{
67 __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
68}
69
70/**
71 * rt2x00pci_regbusy_read - Read from register with busy check
72 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
73 * @offset: Register offset
74 * @field: Field to check if register is busy
75 * @reg: Pointer to where register contents should be stored
76 *
77 * This function will read the given register, and checks if the
78 * register is busy. If it is, it will sleep for a couple of
79 * microseconds before reading the register again. If the register
80 * is not read after a certain timeout, this function will return
81 * FALSE.
82 */
83int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
84 const unsigned int offset,
85 const struct rt2x00_field32 field,
86 u32 *reg);
87
88/**
89 * struct queue_entry_priv_pci: Per entry PCI specific information
90 *
91 * @desc: Pointer to device descriptor
92 * @desc_dma: DMA pointer to &desc.
93 * @data: Pointer to device's entry memory.
94 * @data_dma: DMA pointer to &data.
95 */
96struct queue_entry_priv_pci {
97 __le32 *desc;
98 dma_addr_t desc_dma;
99};
100
101/**
102 * rt2x00pci_rxdone - Handle RX done events
103 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
104 *
105 * Returns true if there are still rx frames pending and false if all
106 * pending rx frames were processed.
107 */
108bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
109
110/**
111 * rt2x00pci_flush_queue - Flush data queue
112 * @queue: Data queue to stop
113 * @drop: True to drop all pending frames.
114 *
115 * This will wait for a maximum of 100ms, waiting for the queues
116 * to become empty.
117 */
118void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
119
120/*
121 * Device initialization handlers.
122 */
123int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
124void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
125
126/*
127 * PCI driver handlers. 39 * PCI driver handlers.
128 */ 40 */
129int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops); 41int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index f95792cfcf89..9e3c8ff53e3f 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -35,6 +35,7 @@
35#include <linux/eeprom_93cx6.h> 35#include <linux/eeprom_93cx6.h>
36 36
37#include "rt2x00.h" 37#include "rt2x00.h"
38#include "rt2x00mmio.h"
38#include "rt2x00pci.h" 39#include "rt2x00pci.h"
39#include "rt61pci.h" 40#include "rt61pci.h"
40 41
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 156b52732f3d..5847d6d0881e 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -851,6 +851,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
851 if (unlikely(!_urb)) { 851 if (unlikely(!_urb)) {
852 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 852 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
853 "Can't allocate urb. Drop skb!\n"); 853 "Can't allocate urb. Drop skb!\n");
854 kfree_skb(skb);
854 return; 855 return;
855 } 856 }
856 _rtl_submit_tx_urb(hw, _urb); 857 _rtl_submit_tx_urb(hw, _urb);
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index eef38cfd812e..ca33ae193935 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -22,7 +22,7 @@
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/mei_bus.h> 25#include <linux/mei_cl_bus.h>
26 26
27#include <linux/nfc.h> 27#include <linux/nfc.h>
28#include <net/nfc/hci.h> 28#include <net/nfc/hci.h>
@@ -32,9 +32,6 @@
32 32
33#define MICROREAD_DRIVER_NAME "microread" 33#define MICROREAD_DRIVER_NAME "microread"
34 34
35#define MICROREAD_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 0x94, \
36 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
37
38struct mei_nfc_hdr { 35struct mei_nfc_hdr {
39 u8 cmd; 36 u8 cmd;
40 u8 status; 37 u8 status;
@@ -48,7 +45,7 @@ struct mei_nfc_hdr {
48#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) 45#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)
49 46
50struct microread_mei_phy { 47struct microread_mei_phy {
51 struct mei_device *mei_device; 48 struct mei_cl_device *device;
52 struct nfc_hci_dev *hdev; 49 struct nfc_hci_dev *hdev;
53 50
54 int powered; 51 int powered;
@@ -105,14 +102,14 @@ static int microread_mei_write(void *phy_id, struct sk_buff *skb)
105 102
106 MEI_DUMP_SKB_OUT("mei frame sent", skb); 103 MEI_DUMP_SKB_OUT("mei frame sent", skb);
107 104
108 r = mei_send(phy->device, skb->data, skb->len); 105 r = mei_cl_send(phy->device, skb->data, skb->len);
109 if (r > 0) 106 if (r > 0)
110 r = 0; 107 r = 0;
111 108
112 return r; 109 return r;
113} 110}
114 111
115static void microread_event_cb(struct mei_device *device, u32 events, 112static void microread_event_cb(struct mei_cl_device *device, u32 events,
116 void *context) 113 void *context)
117{ 114{
118 struct microread_mei_phy *phy = context; 115 struct microread_mei_phy *phy = context;
@@ -120,7 +117,7 @@ static void microread_event_cb(struct mei_device *device, u32 events,
120 if (phy->hard_fault != 0) 117 if (phy->hard_fault != 0)
121 return; 118 return;
122 119
123 if (events & BIT(MEI_EVENT_RX)) { 120 if (events & BIT(MEI_CL_EVENT_RX)) {
124 struct sk_buff *skb; 121 struct sk_buff *skb;
125 int reply_size; 122 int reply_size;
126 123
@@ -128,7 +125,7 @@ static void microread_event_cb(struct mei_device *device, u32 events,
128 if (!skb) 125 if (!skb)
129 return; 126 return;
130 127
131 reply_size = mei_recv(device, skb->data, MEI_NFC_MAX_READ); 128 reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ);
132 if (reply_size < MEI_NFC_HEADER_SIZE) { 129 if (reply_size < MEI_NFC_HEADER_SIZE) {
133 kfree(skb); 130 kfree(skb);
134 return; 131 return;
@@ -149,8 +146,8 @@ static struct nfc_phy_ops mei_phy_ops = {
149 .disable = microread_mei_disable, 146 .disable = microread_mei_disable,
150}; 147};
151 148
152static int microread_mei_probe(struct mei_device *device, 149static int microread_mei_probe(struct mei_cl_device *device,
153 const struct mei_id *id) 150 const struct mei_cl_device_id *id)
154{ 151{
155 struct microread_mei_phy *phy; 152 struct microread_mei_phy *phy;
156 int r; 153 int r;
@@ -164,9 +161,9 @@ static int microread_mei_probe(struct mei_device *device,
164 } 161 }
165 162
166 phy->device = device; 163 phy->device = device;
167 mei_set_clientdata(device, phy); 164 mei_cl_set_drvdata(device, phy);
168 165
169 r = mei_register_event_cb(device, microread_event_cb, phy); 166 r = mei_cl_register_event_cb(device, microread_event_cb, phy);
170 if (r) { 167 if (r) {
171 pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n"); 168 pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n");
172 goto err_out; 169 goto err_out;
@@ -186,9 +183,9 @@ err_out:
186 return r; 183 return r;
187} 184}
188 185
189static int microread_mei_remove(struct mei_device *device) 186static int microread_mei_remove(struct mei_cl_device *device)
190{ 187{
191 struct microread_mei_phy *phy = mei_get_clientdata(device); 188 struct microread_mei_phy *phy = mei_cl_get_drvdata(device);
192 189
193 pr_info("Removing microread\n"); 190 pr_info("Removing microread\n");
194 191
@@ -202,16 +199,15 @@ static int microread_mei_remove(struct mei_device *device)
202 return 0; 199 return 0;
203} 200}
204 201
205static struct mei_id microread_mei_tbl[] = { 202static struct mei_cl_device_id microread_mei_tbl[] = {
206 { MICROREAD_DRIVER_NAME, MICROREAD_UUID }, 203 { MICROREAD_DRIVER_NAME },
207 204
208 /* required last entry */ 205 /* required last entry */
209 { } 206 { }
210}; 207};
211
212MODULE_DEVICE_TABLE(mei, microread_mei_tbl); 208MODULE_DEVICE_TABLE(mei, microread_mei_tbl);
213 209
214static struct mei_driver microread_driver = { 210static struct mei_cl_driver microread_driver = {
215 .id_table = microread_mei_tbl, 211 .id_table = microread_mei_tbl,
216 .name = MICROREAD_DRIVER_NAME, 212 .name = MICROREAD_DRIVER_NAME,
217 213
@@ -225,7 +221,7 @@ static int microread_mei_init(void)
225 221
226 pr_debug(DRIVER_DESC ": %s\n", __func__); 222 pr_debug(DRIVER_DESC ": %s\n", __func__);
227 223
228 r = mei_driver_register(&microread_driver); 224 r = mei_cl_driver_register(&microread_driver);
229 if (r) { 225 if (r) {
230 pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n"); 226 pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n");
231 return r; 227 return r;
@@ -236,7 +232,7 @@ static int microread_mei_init(void)
236 232
237static void microread_mei_exit(void) 233static void microread_mei_exit(void)
238{ 234{
239 mei_driver_unregister(&microread_driver); 235 mei_cl_driver_unregister(&microread_driver);
240} 236}
241 237
242module_init(microread_mei_init); 238module_init(microread_mei_init);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index dee5dddaa292..5147c210df52 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -53,14 +53,15 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
53 return; 53 return;
54 } 54 }
55 55
56 if (!pci_dev->pm_cap || !pci_dev->pme_support 56 /* Clear PME Status if set. */
57 || pci_check_pme_status(pci_dev)) { 57 if (pci_dev->pme_support)
58 if (pci_dev->pme_poll) 58 pci_check_pme_status(pci_dev);
59 pci_dev->pme_poll = false;
60 59
61 pci_wakeup_event(pci_dev); 60 if (pci_dev->pme_poll)
62 pm_runtime_resume(&pci_dev->dev); 61 pci_dev->pme_poll = false;
63 } 62
63 pci_wakeup_event(pci_dev);
64 pm_runtime_resume(&pci_dev->dev);
64 65
65 if (pci_dev->subordinate) 66 if (pci_dev->subordinate)
66 pci_pme_wakeup_bus(pci_dev->subordinate); 67 pci_pme_wakeup_bus(pci_dev->subordinate);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 1fa1e482a999..79277fb36c6b 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -390,9 +390,10 @@ static void pci_device_shutdown(struct device *dev)
390 390
391 /* 391 /*
392 * Turn off Bus Master bit on the device to tell it to not 392 * Turn off Bus Master bit on the device to tell it to not
393 * continue to do DMA 393 * continue to do DMA. Don't touch devices in D3cold or unknown states.
394 */ 394 */
395 pci_clear_master(pci_dev); 395 if (pci_dev->current_state <= PCI_D3hot)
396 pci_clear_master(pci_dev);
396} 397}
397 398
398#ifdef CONFIG_PM 399#ifdef CONFIG_PM
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 08c243ab034e..ed4d09498337 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -185,14 +185,6 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
185#endif /* !PM */ 185#endif /* !PM */
186 186
187/* 187/*
188 * PCIe port runtime suspend is broken for some chipsets, so use a
189 * black list to disable runtime PM for these chipsets.
190 */
191static const struct pci_device_id port_runtime_pm_black_list[] = {
192 { /* end: all zeroes */ }
193};
194
195/*
196 * pcie_portdrv_probe - Probe PCI-Express port devices 188 * pcie_portdrv_probe - Probe PCI-Express port devices
197 * @dev: PCI-Express port device being probed 189 * @dev: PCI-Express port device being probed
198 * 190 *
@@ -225,16 +217,11 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
225 * it by default. 217 * it by default.
226 */ 218 */
227 dev->d3cold_allowed = false; 219 dev->d3cold_allowed = false;
228 if (!pci_match_id(port_runtime_pm_black_list, dev))
229 pm_runtime_put_noidle(&dev->dev);
230
231 return 0; 220 return 0;
232} 221}
233 222
234static void pcie_portdrv_remove(struct pci_dev *dev) 223static void pcie_portdrv_remove(struct pci_dev *dev)
235{ 224{
236 if (!pci_match_id(port_runtime_pm_black_list, dev))
237 pm_runtime_get_noresume(&dev->dev);
238 pcie_port_device_remove(dev); 225 pcie_port_device_remove(dev);
239 pci_disable_device(dev); 226 pci_disable_device(dev);
240} 227}
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index b41ac7756a4b..c5d0a08a8747 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -100,27 +100,6 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
100 return min((size_t)(image - rom), size); 100 return min((size_t)(image - rom), size);
101} 101}
102 102
103static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size)
104{
105 struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
106 loff_t start;
107
108 /* assign the ROM an address if it doesn't have one */
109 if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE))
110 return 0;
111 start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
112 *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
113
114 if (*size == 0)
115 return 0;
116
117 /* Enable ROM space decodes */
118 if (pci_enable_rom(pdev))
119 return 0;
120
121 return start;
122}
123
124/** 103/**
125 * pci_map_rom - map a PCI ROM to kernel space 104 * pci_map_rom - map a PCI ROM to kernel space
126 * @pdev: pointer to pci device struct 105 * @pdev: pointer to pci device struct
@@ -135,7 +114,7 @@ static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size)
135void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) 114void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
136{ 115{
137 struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; 116 struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
138 loff_t start = 0; 117 loff_t start;
139 void __iomem *rom; 118 void __iomem *rom;
140 119
141 /* 120 /*
@@ -154,21 +133,21 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
154 return (void __iomem *)(unsigned long) 133 return (void __iomem *)(unsigned long)
155 pci_resource_start(pdev, PCI_ROM_RESOURCE); 134 pci_resource_start(pdev, PCI_ROM_RESOURCE);
156 } else { 135 } else {
157 start = pci_find_rom(pdev, size); 136 /* assign the ROM an address if it doesn't have one */
158 } 137 if (res->parent == NULL &&
159 } 138 pci_assign_resource(pdev,PCI_ROM_RESOURCE))
139 return NULL;
140 start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
141 *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
142 if (*size == 0)
143 return NULL;
160 144
161 /* 145 /* Enable ROM space decodes */
162 * Some devices may provide ROMs via a source other than the BAR 146 if (pci_enable_rom(pdev))
163 */ 147 return NULL;
164 if (!start && pdev->rom && pdev->romlen) { 148 }
165 *size = pdev->romlen;
166 return phys_to_virt(pdev->rom);
167 } 149 }
168 150
169 if (!start)
170 return NULL;
171
172 rom = ioremap(start, *size); 151 rom = ioremap(start, *size);
173 if (!rom) { 152 if (!rom) {
174 /* restore enable if ioremap fails */ 153 /* restore enable if ioremap fails */
@@ -202,8 +181,7 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
202 if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) 181 if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY))
203 return; 182 return;
204 183
205 if (!pdev->rom || !pdev->romlen) 184 iounmap(rom);
206 iounmap(rom);
207 185
208 /* Disable again before continuing, leave enabled if pci=rom */ 186 /* Disable again before continuing, leave enabled if pci=rom */
209 if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) 187 if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW)))
@@ -227,7 +205,24 @@ void pci_cleanup_rom(struct pci_dev *pdev)
227 } 205 }
228} 206}
229 207
208/**
209 * pci_platform_rom - provides a pointer to any ROM image provided by the
210 * platform
211 * @pdev: pointer to pci device struct
212 * @size: pointer to receive size of pci window over ROM
213 */
214void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size)
215{
216 if (pdev->rom && pdev->romlen) {
217 *size = pdev->romlen;
218 return phys_to_virt((phys_addr_t)pdev->rom);
219 }
220
221 return NULL;
222}
223
230EXPORT_SYMBOL(pci_map_rom); 224EXPORT_SYMBOL(pci_map_rom);
231EXPORT_SYMBOL(pci_unmap_rom); 225EXPORT_SYMBOL(pci_unmap_rom);
232EXPORT_SYMBOL_GPL(pci_enable_rom); 226EXPORT_SYMBOL_GPL(pci_enable_rom);
233EXPORT_SYMBOL_GPL(pci_disable_rom); 227EXPORT_SYMBOL_GPL(pci_disable_rom);
228EXPORT_SYMBOL(pci_platform_rom);
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index c689c04a4f52..2d2f0a43d36b 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -620,7 +620,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
620 620
621 /* special soc specific control */ 621 /* special soc specific control */
622 if (ctrl->mpp_get || ctrl->mpp_set) { 622 if (ctrl->mpp_get || ctrl->mpp_set) {
623 if (!ctrl->name || !ctrl->mpp_set || !ctrl->mpp_set) { 623 if (!ctrl->name || !ctrl->mpp_get || !ctrl->mpp_set) {
624 dev_err(&pdev->dev, "wrong soc control info\n"); 624 dev_err(&pdev->dev, "wrong soc control info\n");
625 return -EINVAL; 625 return -EINVAL;
626 } 626 }
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index ac8d382a79bb..d611ecfcbf70 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -622,7 +622,7 @@ static const struct file_operations pinconf_dbg_pinname_fops = {
622static int pinconf_dbg_state_print(struct seq_file *s, void *d) 622static int pinconf_dbg_state_print(struct seq_file *s, void *d)
623{ 623{
624 if (strlen(dbg_state_name)) 624 if (strlen(dbg_state_name))
625 seq_printf(s, "%s\n", dbg_pinname); 625 seq_printf(s, "%s\n", dbg_state_name);
626 else 626 else
627 seq_printf(s, "No pin state set\n"); 627 seq_printf(s, "No pin state set\n");
628 return 0; 628 return 0;
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
index e3ed8cb072a5..bfda73d64eed 100644
--- a/drivers/pinctrl/pinconf.h
+++ b/drivers/pinctrl/pinconf.h
@@ -90,7 +90,7 @@ static inline void pinconf_init_device_debugfs(struct dentry *devroot,
90 * pin config. 90 * pin config.
91 */ 91 */
92 92
93#ifdef CONFIG_GENERIC_PINCONF 93#if defined(CONFIG_GENERIC_PINCONF) && defined(CONFIG_DEBUG_FS)
94 94
95void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev, 95void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
96 struct seq_file *s, unsigned pin); 96 struct seq_file *s, unsigned pin);
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c
index caecdd373061..c542a97c82f3 100644
--- a/drivers/pinctrl/pinctrl-abx500.c
+++ b/drivers/pinctrl/pinctrl-abx500.c
@@ -422,7 +422,7 @@ static u8 abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
422 } 422 }
423 423
424 /* check if pin use AlternateFunction register */ 424 /* check if pin use AlternateFunction register */
425 if ((af.alt_bit1 == UNUSED) && (af.alt_bit1 == UNUSED)) 425 if ((af.alt_bit1 == UNUSED) && (af.alt_bit2 == UNUSED))
426 return mode; 426 return mode;
427 /* 427 /*
428 * if pin GPIOSEL bit is set and pin supports alternate function, 428 * if pin GPIOSEL bit is set and pin supports alternate function,
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 1a00658b3ea0..bd83c8b01cd1 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -194,6 +194,11 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
194 } 194 }
195 195
196 if (!gpio_range) { 196 if (!gpio_range) {
197 /*
198 * A pin should not be freed more times than allocated.
199 */
200 if (WARN_ON(!desc->mux_usecount))
201 return NULL;
197 desc->mux_usecount--; 202 desc->mux_usecount--;
198 if (desc->mux_usecount) 203 if (desc->mux_usecount)
199 return NULL; 204 return NULL;
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index cc1f7bf53fd0..c6d77e20622c 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -4,7 +4,7 @@ menu "Remoteproc drivers"
4config REMOTEPROC 4config REMOTEPROC
5 tristate 5 tristate
6 depends on HAS_DMA 6 depends on HAS_DMA
7 select FW_CONFIG 7 select FW_LOADER
8 select VIRTIO 8 select VIRTIO
9 9
10config OMAP_REMOTEPROC 10config OMAP_REMOTEPROC
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 29387df4bfc9..8edb4aed5d36 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -217,7 +217,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
217 * TODO: support predefined notifyids (via resource table) 217 * TODO: support predefined notifyids (via resource table)
218 */ 218 */
219 ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); 219 ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
220 if (ret) { 220 if (ret < 0) {
221 dev_err(dev, "idr_alloc failed: %d\n", ret); 221 dev_err(dev, "idr_alloc failed: %d\n", ret);
222 dma_free_coherent(dev->parent, size, va, dma); 222 dma_free_coherent(dev->parent, size, va, dma);
223 return ret; 223 return ret;
@@ -366,10 +366,12 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
366 /* it is now safe to add the virtio device */ 366 /* it is now safe to add the virtio device */
367 ret = rproc_add_virtio_dev(rvdev, rsc->id); 367 ret = rproc_add_virtio_dev(rvdev, rsc->id);
368 if (ret) 368 if (ret)
369 goto free_rvdev; 369 goto remove_rvdev;
370 370
371 return 0; 371 return 0;
372 372
373remove_rvdev:
374 list_del(&rvdev->node);
373free_rvdev: 375free_rvdev:
374 kfree(rvdev); 376 kfree(rvdev);
375 return ret; 377 return ret;
diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c
index a7743c069339..fb95c4220052 100644
--- a/drivers/remoteproc/ste_modem_rproc.c
+++ b/drivers/remoteproc/ste_modem_rproc.c
@@ -240,6 +240,8 @@ static int sproc_drv_remove(struct platform_device *pdev)
240 240
241 /* Unregister as remoteproc device */ 241 /* Unregister as remoteproc device */
242 rproc_del(sproc->rproc); 242 rproc_del(sproc->rproc);
243 dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE,
244 sproc->fw_addr, sproc->fw_dma_addr);
243 rproc_put(sproc->rproc); 245 rproc_put(sproc->rproc);
244 246
245 mdev->drv_data = NULL; 247 mdev->drv_data = NULL;
@@ -297,10 +299,13 @@ static int sproc_probe(struct platform_device *pdev)
297 /* Register as a remoteproc device */ 299 /* Register as a remoteproc device */
298 err = rproc_add(rproc); 300 err = rproc_add(rproc);
299 if (err) 301 if (err)
300 goto free_rproc; 302 goto free_mem;
301 303
302 return 0; 304 return 0;
303 305
306free_mem:
307 dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE,
308 sproc->fw_addr, sproc->fw_dma_addr);
304free_rproc: 309free_rproc:
305 /* Reset device data upon error */ 310 /* Reset device data upon error */
306 mdev->drv_data = NULL; 311 mdev->drv_data = NULL;
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 0a9f27e094ea..434ebc3a99dc 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -44,7 +44,6 @@ static DECLARE_COMPLETION(at91_rtc_updated);
44static unsigned int at91_alarm_year = AT91_RTC_EPOCH; 44static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
45static void __iomem *at91_rtc_regs; 45static void __iomem *at91_rtc_regs;
46static int irq; 46static int irq;
47static u32 at91_rtc_imr;
48 47
49/* 48/*
50 * Decode time/date into rtc_time structure 49 * Decode time/date into rtc_time structure
@@ -109,11 +108,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
109 cr = at91_rtc_read(AT91_RTC_CR); 108 cr = at91_rtc_read(AT91_RTC_CR);
110 at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); 109 at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
111 110
112 at91_rtc_imr |= AT91_RTC_ACKUPD;
113 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); 111 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
114 wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ 112 wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
115 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); 113 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
116 at91_rtc_imr &= ~AT91_RTC_ACKUPD;
117 114
118 at91_rtc_write(AT91_RTC_TIMR, 115 at91_rtc_write(AT91_RTC_TIMR,
119 bin2bcd(tm->tm_sec) << 0 116 bin2bcd(tm->tm_sec) << 0
@@ -145,7 +142,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
145 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); 142 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
146 tm->tm_year = at91_alarm_year - 1900; 143 tm->tm_year = at91_alarm_year - 1900;
147 144
148 alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM) 145 alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
149 ? 1 : 0; 146 ? 1 : 0;
150 147
151 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 148 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -171,7 +168,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
171 tm.tm_sec = alrm->time.tm_sec; 168 tm.tm_sec = alrm->time.tm_sec;
172 169
173 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); 170 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
174 at91_rtc_imr &= ~AT91_RTC_ALARM;
175 at91_rtc_write(AT91_RTC_TIMALR, 171 at91_rtc_write(AT91_RTC_TIMALR,
176 bin2bcd(tm.tm_sec) << 0 172 bin2bcd(tm.tm_sec) << 0
177 | bin2bcd(tm.tm_min) << 8 173 | bin2bcd(tm.tm_min) << 8
@@ -184,7 +180,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
184 180
185 if (alrm->enabled) { 181 if (alrm->enabled) {
186 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); 182 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
187 at91_rtc_imr |= AT91_RTC_ALARM;
188 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); 183 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
189 } 184 }
190 185
@@ -201,12 +196,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
201 196
202 if (enabled) { 197 if (enabled) {
203 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); 198 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
204 at91_rtc_imr |= AT91_RTC_ALARM;
205 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); 199 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
206 } else { 200 } else
207 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); 201 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
208 at91_rtc_imr &= ~AT91_RTC_ALARM;
209 }
210 202
211 return 0; 203 return 0;
212} 204}
@@ -215,10 +207,12 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
215 */ 207 */
216static int at91_rtc_proc(struct device *dev, struct seq_file *seq) 208static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
217{ 209{
210 unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
211
218 seq_printf(seq, "update_IRQ\t: %s\n", 212 seq_printf(seq, "update_IRQ\t: %s\n",
219 (at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no"); 213 (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
220 seq_printf(seq, "periodic_IRQ\t: %s\n", 214 seq_printf(seq, "periodic_IRQ\t: %s\n",
221 (at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no"); 215 (imr & AT91_RTC_SECEV) ? "yes" : "no");
222 216
223 return 0; 217 return 0;
224} 218}
@@ -233,7 +227,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
233 unsigned int rtsr; 227 unsigned int rtsr;
234 unsigned long events = 0; 228 unsigned long events = 0;
235 229
236 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr; 230 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
237 if (rtsr) { /* this interrupt is shared! Is it ours? */ 231 if (rtsr) { /* this interrupt is shared! Is it ours? */
238 if (rtsr & AT91_RTC_ALARM) 232 if (rtsr & AT91_RTC_ALARM)
239 events |= (RTC_AF | RTC_IRQF); 233 events |= (RTC_AF | RTC_IRQF);
@@ -297,7 +291,6 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
297 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | 291 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
298 AT91_RTC_SECEV | AT91_RTC_TIMEV | 292 AT91_RTC_SECEV | AT91_RTC_TIMEV |
299 AT91_RTC_CALEV); 293 AT91_RTC_CALEV);
300 at91_rtc_imr = 0;
301 294
302 ret = request_irq(irq, at91_rtc_interrupt, 295 ret = request_irq(irq, at91_rtc_interrupt,
303 IRQF_SHARED, 296 IRQF_SHARED,
@@ -336,7 +329,6 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
336 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | 329 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
337 AT91_RTC_SECEV | AT91_RTC_TIMEV | 330 AT91_RTC_SECEV | AT91_RTC_TIMEV |
338 AT91_RTC_CALEV); 331 AT91_RTC_CALEV);
339 at91_rtc_imr = 0;
340 free_irq(irq, pdev); 332 free_irq(irq, pdev);
341 333
342 rtc_device_unregister(rtc); 334 rtc_device_unregister(rtc);
@@ -349,35 +341,31 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
349 341
350/* AT91RM9200 RTC Power management control */ 342/* AT91RM9200 RTC Power management control */
351 343
352static u32 at91_rtc_bkpimr; 344static u32 at91_rtc_imr;
353
354 345
355static int at91_rtc_suspend(struct device *dev) 346static int at91_rtc_suspend(struct device *dev)
356{ 347{
357 /* this IRQ is shared with DBGU and other hardware which isn't 348 /* this IRQ is shared with DBGU and other hardware which isn't
358 * necessarily doing PM like we are... 349 * necessarily doing PM like we are...
359 */ 350 */
360 at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV); 351 at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR)
361 if (at91_rtc_bkpimr) { 352 & (AT91_RTC_ALARM|AT91_RTC_SECEV);
362 if (device_may_wakeup(dev)) { 353 if (at91_rtc_imr) {
354 if (device_may_wakeup(dev))
363 enable_irq_wake(irq); 355 enable_irq_wake(irq);
364 } else { 356 else
365 at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr); 357 at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
366 at91_rtc_imr &= ~at91_rtc_bkpimr; 358 }
367 }
368}
369 return 0; 359 return 0;
370} 360}
371 361
372static int at91_rtc_resume(struct device *dev) 362static int at91_rtc_resume(struct device *dev)
373{ 363{
374 if (at91_rtc_bkpimr) { 364 if (at91_rtc_imr) {
375 if (device_may_wakeup(dev)) { 365 if (device_may_wakeup(dev))
376 disable_irq_wake(irq); 366 disable_irq_wake(irq);
377 } else { 367 else
378 at91_rtc_imr |= at91_rtc_bkpimr; 368 at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
379 at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr);
380 }
381 } 369 }
382 return 0; 370 return 0;
383} 371}
diff --git a/drivers/rtc/rtc-at91rm9200.h b/drivers/rtc/rtc-at91rm9200.h
index 5f940b6844cb..da1945e5f714 100644
--- a/drivers/rtc/rtc-at91rm9200.h
+++ b/drivers/rtc/rtc-at91rm9200.h
@@ -64,6 +64,7 @@
64#define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */ 64#define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */
65#define AT91_RTC_IER 0x20 /* Interrupt Enable Register */ 65#define AT91_RTC_IER 0x20 /* Interrupt Enable Register */
66#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */ 66#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
67#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
67 68
68#define AT91_RTC_VER 0x2c /* Valid Entry Register */ 69#define AT91_RTC_VER 0x2c /* Valid Entry Register */
69#define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */ 70#define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 5ac9c935c151..e9b9c8392832 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -307,7 +307,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq)
307 case EQC_WR_PROHIBIT: 307 case EQC_WR_PROHIBIT:
308 spin_lock_irqsave(&bdev->lock, flags); 308 spin_lock_irqsave(&bdev->lock, flags);
309 if (bdev->state != SCM_WR_PROHIBIT) 309 if (bdev->state != SCM_WR_PROHIBIT)
310 pr_info("%lu: Write access to the SCM increment is suspended\n", 310 pr_info("%lx: Write access to the SCM increment is suspended\n",
311 (unsigned long) bdev->scmdev->address); 311 (unsigned long) bdev->scmdev->address);
312 bdev->state = SCM_WR_PROHIBIT; 312 bdev->state = SCM_WR_PROHIBIT;
313 spin_unlock_irqrestore(&bdev->lock, flags); 313 spin_unlock_irqrestore(&bdev->lock, flags);
@@ -445,7 +445,7 @@ void scm_blk_set_available(struct scm_blk_dev *bdev)
445 445
446 spin_lock_irqsave(&bdev->lock, flags); 446 spin_lock_irqsave(&bdev->lock, flags);
447 if (bdev->state == SCM_WR_PROHIBIT) 447 if (bdev->state == SCM_WR_PROHIBIT)
448 pr_info("%lu: Write access to the SCM increment is restored\n", 448 pr_info("%lx: Write access to the SCM increment is restored\n",
449 (unsigned long) bdev->scmdev->address); 449 (unsigned long) bdev->scmdev->address);
450 bdev->state = SCM_OPER; 450 bdev->state = SCM_OPER;
451 spin_unlock_irqrestore(&bdev->lock, flags); 451 spin_unlock_irqrestore(&bdev->lock, flags);
@@ -463,12 +463,15 @@ static int __init scm_blk_init(void)
463 goto out; 463 goto out;
464 464
465 scm_major = ret; 465 scm_major = ret;
466 if (scm_alloc_rqs(nr_requests)) 466 ret = scm_alloc_rqs(nr_requests);
467 if (ret)
467 goto out_unreg; 468 goto out_unreg;
468 469
469 scm_debug = debug_register("scm_log", 16, 1, 16); 470 scm_debug = debug_register("scm_log", 16, 1, 16);
470 if (!scm_debug) 471 if (!scm_debug) {
472 ret = -ENOMEM;
471 goto out_free; 473 goto out_free;
474 }
472 475
473 debug_register_view(scm_debug, &debug_hex_ascii_view); 476 debug_register_view(scm_debug, &debug_hex_ascii_view);
474 debug_set_level(scm_debug, 2); 477 debug_set_level(scm_debug, 2);
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c
index 5f6180d6ff08..c98cf52d78d1 100644
--- a/drivers/s390/block/scm_drv.c
+++ b/drivers/s390/block/scm_drv.c
@@ -19,7 +19,7 @@ static void scm_notify(struct scm_device *scmdev, enum scm_event event)
19 19
20 switch (event) { 20 switch (event) {
21 case SCM_CHANGE: 21 case SCM_CHANGE:
22 pr_info("%lu: The capabilities of the SCM increment changed\n", 22 pr_info("%lx: The capabilities of the SCM increment changed\n",
23 (unsigned long) scmdev->address); 23 (unsigned long) scmdev->address);
24 SCM_LOG(2, "State changed"); 24 SCM_LOG(2, "State changed");
25 SCM_LOG_STATE(2, scmdev); 25 SCM_LOG_STATE(2, scmdev);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index b907dba24025..cee69dac3e18 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -915,7 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
915 int i, rc; 915 int i, rc;
916 916
917 /* Check if the tty3270 is already there. */ 917 /* Check if the tty3270 is already there. */
918 view = raw3270_find_view(&tty3270_fn, tty->index); 918 view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR);
919 if (!IS_ERR(view)) { 919 if (!IS_ERR(view)) {
920 tp = container_of(view, struct tty3270, view); 920 tp = container_of(view, struct tty3270, view);
921 tty->driver_data = tp; 921 tty->driver_data = tp;
@@ -927,15 +927,16 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
927 tp->inattr = TF_INPUT; 927 tp->inattr = TF_INPUT;
928 return tty_port_install(&tp->port, driver, tty); 928 return tty_port_install(&tp->port, driver, tty);
929 } 929 }
930 if (tty3270_max_index < tty->index) 930 if (tty3270_max_index < tty->index + 1)
931 tty3270_max_index = tty->index; 931 tty3270_max_index = tty->index + 1;
932 932
933 /* Allocate tty3270 structure on first open. */ 933 /* Allocate tty3270 structure on first open. */
934 tp = tty3270_alloc_view(); 934 tp = tty3270_alloc_view();
935 if (IS_ERR(tp)) 935 if (IS_ERR(tp))
936 return PTR_ERR(tp); 936 return PTR_ERR(tp);
937 937
938 rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index); 938 rc = raw3270_add_view(&tp->view, &tty3270_fn,
939 tty->index + RAW3270_FIRSTMINOR);
939 if (rc) { 940 if (rc) {
940 tty3270_free_view(tp); 941 tty3270_free_view(tp);
941 return rc; 942 return rc;
@@ -1846,12 +1847,12 @@ static const struct tty_operations tty3270_ops = {
1846 1847
1847void tty3270_create_cb(int minor) 1848void tty3270_create_cb(int minor)
1848{ 1849{
1849 tty_register_device(tty3270_driver, minor, NULL); 1850 tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
1850} 1851}
1851 1852
1852void tty3270_destroy_cb(int minor) 1853void tty3270_destroy_cb(int minor)
1853{ 1854{
1854 tty_unregister_device(tty3270_driver, minor); 1855 tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
1855} 1856}
1856 1857
1857struct raw3270_notifier tty3270_notifier = 1858struct raw3270_notifier tty3270_notifier =
@@ -1884,7 +1885,8 @@ static int __init tty3270_init(void)
1884 driver->driver_name = "tty3270"; 1885 driver->driver_name = "tty3270";
1885 driver->name = "3270/tty"; 1886 driver->name = "3270/tty";
1886 driver->major = IBM_TTY3270_MAJOR; 1887 driver->major = IBM_TTY3270_MAJOR;
1887 driver->minor_start = 0; 1888 driver->minor_start = RAW3270_FIRSTMINOR;
1889 driver->name_base = RAW3270_FIRSTMINOR;
1888 driver->type = TTY_DRIVER_TYPE_SYSTEM; 1890 driver->type = TTY_DRIVER_TYPE_SYSTEM;
1889 driver->subtype = SYSTEM_TYPE_TTY; 1891 driver->subtype = SYSTEM_TYPE_TTY;
1890 driver->init_termios = tty_std_termios; 1892 driver->init_termios = tty_std_termios;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 8c0622399fcd..6ccb7457746b 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -769,6 +769,7 @@ struct qeth_card {
769 unsigned long thread_start_mask; 769 unsigned long thread_start_mask;
770 unsigned long thread_allowed_mask; 770 unsigned long thread_allowed_mask;
771 unsigned long thread_running_mask; 771 unsigned long thread_running_mask;
772 struct task_struct *recovery_task;
772 spinlock_t ip_lock; 773 spinlock_t ip_lock;
773 struct list_head ip_list; 774 struct list_head ip_list;
774 struct list_head *ip_tbd_list; 775 struct list_head *ip_tbd_list;
@@ -862,6 +863,8 @@ extern struct qeth_card_list_struct qeth_core_card_list;
862extern struct kmem_cache *qeth_core_header_cache; 863extern struct kmem_cache *qeth_core_header_cache;
863extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; 864extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
864 865
866void qeth_set_recovery_task(struct qeth_card *);
867void qeth_clear_recovery_task(struct qeth_card *);
865void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); 868void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
866int qeth_threads_running(struct qeth_card *, unsigned long); 869int qeth_threads_running(struct qeth_card *, unsigned long);
867int qeth_wait_for_threads(struct qeth_card *, unsigned long); 870int qeth_wait_for_threads(struct qeth_card *, unsigned long);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 0d73a999983d..451f92020599 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -177,6 +177,23 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
177 return "n/a"; 177 return "n/a";
178} 178}
179 179
180void qeth_set_recovery_task(struct qeth_card *card)
181{
182 card->recovery_task = current;
183}
184EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
185
186void qeth_clear_recovery_task(struct qeth_card *card)
187{
188 card->recovery_task = NULL;
189}
190EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
191
192static bool qeth_is_recovery_task(const struct qeth_card *card)
193{
194 return card->recovery_task == current;
195}
196
180void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 197void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
181 int clear_start_mask) 198 int clear_start_mask)
182{ 199{
@@ -205,6 +222,8 @@ EXPORT_SYMBOL_GPL(qeth_threads_running);
205 222
206int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) 223int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
207{ 224{
225 if (qeth_is_recovery_task(card))
226 return 0;
208 return wait_event_interruptible(card->wait_q, 227 return wait_event_interruptible(card->wait_q,
209 qeth_threads_running(card, threads) == 0); 228 qeth_threads_running(card, threads) == 0);
210} 229}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d690166efeaf..155b101bd730 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1143,6 +1143,7 @@ static int qeth_l2_recover(void *ptr)
1143 QETH_CARD_TEXT(card, 2, "recover2"); 1143 QETH_CARD_TEXT(card, 2, "recover2");
1144 dev_warn(&card->gdev->dev, 1144 dev_warn(&card->gdev->dev,
1145 "A recovery process has been started for the device\n"); 1145 "A recovery process has been started for the device\n");
1146 qeth_set_recovery_task(card);
1146 __qeth_l2_set_offline(card->gdev, 1); 1147 __qeth_l2_set_offline(card->gdev, 1);
1147 rc = __qeth_l2_set_online(card->gdev, 1); 1148 rc = __qeth_l2_set_online(card->gdev, 1);
1148 if (!rc) 1149 if (!rc)
@@ -1153,6 +1154,7 @@ static int qeth_l2_recover(void *ptr)
1153 dev_warn(&card->gdev->dev, "The qeth device driver " 1154 dev_warn(&card->gdev->dev, "The qeth device driver "
1154 "failed to recover an error on the device\n"); 1155 "failed to recover an error on the device\n");
1155 } 1156 }
1157 qeth_clear_recovery_task(card);
1156 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1158 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1157 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 1159 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
1158 return 0; 1160 return 0;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8710337dab3e..1f7edf1b26c3 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3515,6 +3515,7 @@ static int qeth_l3_recover(void *ptr)
3515 QETH_CARD_TEXT(card, 2, "recover2"); 3515 QETH_CARD_TEXT(card, 2, "recover2");
3516 dev_warn(&card->gdev->dev, 3516 dev_warn(&card->gdev->dev,
3517 "A recovery process has been started for the device\n"); 3517 "A recovery process has been started for the device\n");
3518 qeth_set_recovery_task(card);
3518 __qeth_l3_set_offline(card->gdev, 1); 3519 __qeth_l3_set_offline(card->gdev, 1);
3519 rc = __qeth_l3_set_online(card->gdev, 1); 3520 rc = __qeth_l3_set_online(card->gdev, 1);
3520 if (!rc) 3521 if (!rc)
@@ -3525,6 +3526,7 @@ static int qeth_l3_recover(void *ptr)
3525 dev_warn(&card->gdev->dev, "The qeth device driver " 3526 dev_warn(&card->gdev->dev, "The qeth device driver "
3526 "failed to recover an error on the device\n"); 3527 "failed to recover an error on the device\n");
3527 } 3528 }
3529 qeth_clear_recovery_task(card);
3528 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 3530 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3529 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 3531 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
3530 return 0; 3532 return 0;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 2daf4b0da434..90bc7bd00966 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -940,6 +940,7 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
940 fc_exch_init(lport); 940 fc_exch_init(lport);
941 fc_rport_init(lport); 941 fc_rport_init(lport);
942 fc_disc_init(lport); 942 fc_disc_init(lport);
943 fc_disc_config(lport, lport);
943 return 0; 944 return 0;
944} 945}
945 946
@@ -2133,6 +2134,7 @@ static int _bnx2fc_create(struct net_device *netdev,
2133 } 2134 }
2134 2135
2135 ctlr = bnx2fc_to_ctlr(interface); 2136 ctlr = bnx2fc_to_ctlr(interface);
2137 cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
2136 interface->vlan_id = vlan_id; 2138 interface->vlan_id = vlan_id;
2137 2139
2138 interface->timer_work_queue = 2140 interface->timer_work_queue =
@@ -2143,7 +2145,7 @@ static int _bnx2fc_create(struct net_device *netdev,
2143 goto ifput_err; 2145 goto ifput_err;
2144 } 2146 }
2145 2147
2146 lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0); 2148 lport = bnx2fc_if_create(interface, &cdev->dev, 0);
2147 if (!lport) { 2149 if (!lport) {
2148 printk(KERN_ERR PFX "Failed to create interface (%s)\n", 2150 printk(KERN_ERR PFX "Failed to create interface (%s)\n",
2149 netdev->name); 2151 netdev->name);
@@ -2159,8 +2161,6 @@ static int _bnx2fc_create(struct net_device *netdev,
2159 /* Make this master N_port */ 2161 /* Make this master N_port */
2160 ctlr->lp = lport; 2162 ctlr->lp = lport;
2161 2163
2162 cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
2163
2164 if (link_state == BNX2FC_CREATE_LINK_UP) 2164 if (link_state == BNX2FC_CREATE_LINK_UP)
2165 cdev->enabled = FCOE_CTLR_ENABLED; 2165 cdev->enabled = FCOE_CTLR_ENABLED;
2166 else 2166 else
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index b5d92fc93c70..9bfdc9a3f897 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -490,7 +490,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
490{ 490{
491 struct net_device *netdev = fcoe->netdev; 491 struct net_device *netdev = fcoe->netdev;
492 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); 492 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
493 struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
494 493
495 rtnl_lock(); 494 rtnl_lock();
496 if (!fcoe->removed) 495 if (!fcoe->removed)
@@ -501,7 +500,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
501 /* tear-down the FCoE controller */ 500 /* tear-down the FCoE controller */
502 fcoe_ctlr_destroy(fip); 501 fcoe_ctlr_destroy(fip);
503 scsi_host_put(fip->lp->host); 502 scsi_host_put(fip->lp->host);
504 fcoe_ctlr_device_delete(ctlr_dev);
505 dev_put(netdev); 503 dev_put(netdev);
506 module_put(THIS_MODULE); 504 module_put(THIS_MODULE);
507} 505}
@@ -2194,6 +2192,8 @@ out_nodev:
2194 */ 2192 */
2195static void fcoe_destroy_work(struct work_struct *work) 2193static void fcoe_destroy_work(struct work_struct *work)
2196{ 2194{
2195 struct fcoe_ctlr_device *cdev;
2196 struct fcoe_ctlr *ctlr;
2197 struct fcoe_port *port; 2197 struct fcoe_port *port;
2198 struct fcoe_interface *fcoe; 2198 struct fcoe_interface *fcoe;
2199 struct Scsi_Host *shost; 2199 struct Scsi_Host *shost;
@@ -2224,10 +2224,15 @@ static void fcoe_destroy_work(struct work_struct *work)
2224 mutex_lock(&fcoe_config_mutex); 2224 mutex_lock(&fcoe_config_mutex);
2225 2225
2226 fcoe = port->priv; 2226 fcoe = port->priv;
2227 ctlr = fcoe_to_ctlr(fcoe);
2228 cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
2229
2227 fcoe_if_destroy(port->lport); 2230 fcoe_if_destroy(port->lport);
2228 fcoe_interface_cleanup(fcoe); 2231 fcoe_interface_cleanup(fcoe);
2229 2232
2230 mutex_unlock(&fcoe_config_mutex); 2233 mutex_unlock(&fcoe_config_mutex);
2234
2235 fcoe_ctlr_device_delete(cdev);
2231} 2236}
2232 2237
2233/** 2238/**
@@ -2335,7 +2340,9 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
2335 rc = -EIO; 2340 rc = -EIO;
2336 rtnl_unlock(); 2341 rtnl_unlock();
2337 fcoe_interface_cleanup(fcoe); 2342 fcoe_interface_cleanup(fcoe);
2338 goto out_nortnl; 2343 mutex_unlock(&fcoe_config_mutex);
2344 fcoe_ctlr_device_delete(ctlr_dev);
2345 goto out;
2339 } 2346 }
2340 2347
2341 /* Make this the "master" N_Port */ 2348 /* Make this the "master" N_Port */
@@ -2375,8 +2382,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
2375 2382
2376out_nodev: 2383out_nodev:
2377 rtnl_unlock(); 2384 rtnl_unlock();
2378out_nortnl:
2379 mutex_unlock(&fcoe_config_mutex); 2385 mutex_unlock(&fcoe_config_mutex);
2386out:
2380 return rc; 2387 return rc;
2381} 2388}
2382 2389
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 08c3bc398da2..a76247201be5 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2815,6 +2815,47 @@ unlock:
2815} 2815}
2816 2816
2817/** 2817/**
2818 * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode
2819 * @lport: The local port to be (re)configured
2820 * @fip: The FCoE controller whose mode is changing
2821 * @fip_mode: The new fip mode
2822 *
2823 * Note that the we shouldn't be changing the libfc discovery settings
2824 * (fc_disc_config) while an lport is going through the libfc state
2825 * machine. The mode can only be changed when a fcoe_ctlr device is
2826 * disabled, so that should ensure that this routine is only called
2827 * when nothing is happening.
2828 */
2829void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip,
2830 enum fip_state fip_mode)
2831{
2832 void *priv;
2833
2834 WARN_ON(lport->state != LPORT_ST_RESET &&
2835 lport->state != LPORT_ST_DISABLED);
2836
2837 if (fip_mode == FIP_MODE_VN2VN) {
2838 lport->rport_priv_size = sizeof(struct fcoe_rport);
2839 lport->point_to_multipoint = 1;
2840 lport->tt.disc_recv_req = fcoe_ctlr_disc_recv;
2841 lport->tt.disc_start = fcoe_ctlr_disc_start;
2842 lport->tt.disc_stop = fcoe_ctlr_disc_stop;
2843 lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final;
2844 priv = fip;
2845 } else {
2846 lport->rport_priv_size = 0;
2847 lport->point_to_multipoint = 0;
2848 lport->tt.disc_recv_req = NULL;
2849 lport->tt.disc_start = NULL;
2850 lport->tt.disc_stop = NULL;
2851 lport->tt.disc_stop_final = NULL;
2852 priv = lport;
2853 }
2854
2855 fc_disc_config(lport, priv);
2856}
2857
2858/**
2818 * fcoe_libfc_config() - Sets up libfc related properties for local port 2859 * fcoe_libfc_config() - Sets up libfc related properties for local port
2819 * @lport: The local port to configure libfc for 2860 * @lport: The local port to configure libfc for
2820 * @fip: The FCoE controller in use by the local port 2861 * @fip: The FCoE controller in use by the local port
@@ -2833,21 +2874,9 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
2833 fc_exch_init(lport); 2874 fc_exch_init(lport);
2834 fc_elsct_init(lport); 2875 fc_elsct_init(lport);
2835 fc_lport_init(lport); 2876 fc_lport_init(lport);
2836 if (fip->mode == FIP_MODE_VN2VN)
2837 lport->rport_priv_size = sizeof(struct fcoe_rport);
2838 fc_rport_init(lport); 2877 fc_rport_init(lport);
2839 if (fip->mode == FIP_MODE_VN2VN) { 2878 fc_disc_init(lport);
2840 lport->point_to_multipoint = 1; 2879 fcoe_ctlr_mode_set(lport, fip, fip->mode);
2841 lport->tt.disc_recv_req = fcoe_ctlr_disc_recv;
2842 lport->tt.disc_start = fcoe_ctlr_disc_start;
2843 lport->tt.disc_stop = fcoe_ctlr_disc_stop;
2844 lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final;
2845 mutex_init(&lport->disc.disc_mutex);
2846 INIT_LIST_HEAD(&lport->disc.rports);
2847 lport->disc.priv = fip;
2848 } else {
2849 fc_disc_init(lport);
2850 }
2851 return 0; 2880 return 0;
2852} 2881}
2853EXPORT_SYMBOL_GPL(fcoe_libfc_config); 2882EXPORT_SYMBOL_GPL(fcoe_libfc_config);
@@ -2875,6 +2904,7 @@ EXPORT_SYMBOL(fcoe_fcf_get_selected);
2875void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) 2904void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
2876{ 2905{
2877 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); 2906 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
2907 struct fc_lport *lport = ctlr->lp;
2878 2908
2879 mutex_lock(&ctlr->ctlr_mutex); 2909 mutex_lock(&ctlr->ctlr_mutex);
2880 switch (ctlr_dev->mode) { 2910 switch (ctlr_dev->mode) {
@@ -2888,5 +2918,7 @@ void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
2888 } 2918 }
2889 2919
2890 mutex_unlock(&ctlr->ctlr_mutex); 2920 mutex_unlock(&ctlr->ctlr_mutex);
2921
2922 fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode);
2891} 2923}
2892EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode); 2924EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index a044f593e8b9..d0fa4b6c551f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1899,8 +1899,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1899 sdev->allow_restart = 1; 1899 sdev->allow_restart = 1;
1900 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); 1900 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
1901 } 1901 }
1902 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1903 spin_unlock_irqrestore(shost->host_lock, lock_flags); 1902 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1903 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1904 return 0; 1904 return 0;
1905} 1905}
1906 1906
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index f328089a1060..2197b57fb225 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5148,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5148 ipr_trace; 5148 ipr_trace;
5149 } 5149 }
5150 5150
5151 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); 5151 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5152 if (!ipr_is_naca_model(res)) 5152 if (!ipr_is_naca_model(res))
5153 res->needs_sync_complete = 1; 5153 res->needs_sync_complete = 1;
5154 5154
@@ -9349,7 +9349,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9349 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 9349 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9350 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9350 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9351 9351
9352 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); 9352 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9353 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9354 else
9355 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9353 if (rc) { 9356 if (rc) {
9354 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); 9357 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9355 return rc; 9358 return rc;
@@ -9371,7 +9374,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9371 9374
9372 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9373 9376
9374 free_irq(pdev->irq, ioa_cfg); 9377 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9378 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9379 else
9380 free_irq(pdev->irq, ioa_cfg);
9375 9381
9376 LEAVE; 9382 LEAVE;
9377 9383
@@ -9722,6 +9728,7 @@ static void __ipr_remove(struct pci_dev *pdev)
9722 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 9728 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9723 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 9729 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9724 flush_work(&ioa_cfg->work_q); 9730 flush_work(&ioa_cfg->work_q);
9731 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9725 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 9732 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9726 9733
9727 spin_lock(&ipr_driver_lock); 9734 spin_lock(&ipr_driver_lock);
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 8e561e6a557c..880a9068ca12 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -712,12 +712,13 @@ static void fc_disc_stop_final(struct fc_lport *lport)
712} 712}
713 713
714/** 714/**
715 * fc_disc_init() - Initialize the discovery layer for a local port 715 * fc_disc_config() - Configure the discovery layer for a local port
716 * @lport: The local port that needs the discovery layer to be initialized 716 * @lport: The local port that needs the discovery layer to be configured
717 * @priv: Private data structre for users of the discovery layer
717 */ 718 */
718int fc_disc_init(struct fc_lport *lport) 719void fc_disc_config(struct fc_lport *lport, void *priv)
719{ 720{
720 struct fc_disc *disc; 721 struct fc_disc *disc = &lport->disc;
721 722
722 if (!lport->tt.disc_start) 723 if (!lport->tt.disc_start)
723 lport->tt.disc_start = fc_disc_start; 724 lport->tt.disc_start = fc_disc_start;
@@ -732,12 +733,21 @@ int fc_disc_init(struct fc_lport *lport)
732 lport->tt.disc_recv_req = fc_disc_recv_req; 733 lport->tt.disc_recv_req = fc_disc_recv_req;
733 734
734 disc = &lport->disc; 735 disc = &lport->disc;
736
737 disc->priv = priv;
738}
739EXPORT_SYMBOL(fc_disc_config);
740
741/**
742 * fc_disc_init() - Initialize the discovery layer for a local port
743 * @lport: The local port that needs the discovery layer to be initialized
744 */
745void fc_disc_init(struct fc_lport *lport)
746{
747 struct fc_disc *disc = &lport->disc;
748
735 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); 749 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
736 mutex_init(&disc->disc_mutex); 750 mutex_init(&disc->disc_mutex);
737 INIT_LIST_HEAD(&disc->rports); 751 INIT_LIST_HEAD(&disc->rports);
738
739 disc->priv = lport;
740
741 return 0;
742} 752}
743EXPORT_SYMBOL(fc_disc_init); 753EXPORT_SYMBOL(fc_disc_init);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index aec2e0da5016..55cbd0180159 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -235,6 +235,17 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
235 linkrate = phy->linkrate; 235 linkrate = phy->linkrate;
236 memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); 236 memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
237 237
238 /* Handle vacant phy - rest of dr data is not valid so skip it */
239 if (phy->phy_state == PHY_VACANT) {
240 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
241 phy->attached_dev_type = NO_DEVICE;
242 if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
243 phy->phy_id = phy_id;
244 goto skip;
245 } else
246 goto out;
247 }
248
238 phy->attached_dev_type = to_dev_type(dr); 249 phy->attached_dev_type = to_dev_type(dr);
239 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) 250 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
240 goto out; 251 goto out;
@@ -272,6 +283,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
272 phy->phy->maximum_linkrate = dr->pmax_linkrate; 283 phy->phy->maximum_linkrate = dr->pmax_linkrate;
273 phy->phy->negotiated_linkrate = phy->linkrate; 284 phy->phy->negotiated_linkrate = phy->linkrate;
274 285
286 skip:
275 if (new_phy) 287 if (new_phy)
276 if (sas_phy_add(phy->phy)) { 288 if (sas_phy_add(phy->phy)) {
277 sas_phy_free(phy->phy); 289 sas_phy_free(phy->phy);
@@ -388,7 +400,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single)
388 if (!disc_req) 400 if (!disc_req)
389 return -ENOMEM; 401 return -ENOMEM;
390 402
391 disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE); 403 disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
392 if (!disc_resp) { 404 if (!disc_resp) {
393 kfree(disc_req); 405 kfree(disc_req);
394 return -ENOMEM; 406 return -ENOMEM;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 74b67d98e952..d43faf34c1e2 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -438,11 +438,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
438 struct lpfc_rqe *temp_hrqe; 438 struct lpfc_rqe *temp_hrqe;
439 struct lpfc_rqe *temp_drqe; 439 struct lpfc_rqe *temp_drqe;
440 struct lpfc_register doorbell; 440 struct lpfc_register doorbell;
441 int put_index = hq->host_index; 441 int put_index;
442 442
443 /* sanity check on queue memory */ 443 /* sanity check on queue memory */
444 if (unlikely(!hq) || unlikely(!dq)) 444 if (unlikely(!hq) || unlikely(!dq))
445 return -ENOMEM; 445 return -ENOMEM;
446 put_index = hq->host_index;
446 temp_hrqe = hq->qe[hq->host_index].rqe; 447 temp_hrqe = hq->qe[hq->host_index].rqe;
447 temp_drqe = dq->qe[dq->host_index].rqe; 448 temp_drqe = dq->qe[dq->host_index].rqe;
448 449
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1d82eef4e1eb..b3db9dcc2619 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1938,11 +1938,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1938 "Timer for the VP[%d] has stopped\n", vha->vp_idx); 1938 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
1939 } 1939 }
1940 1940
1941 /* No pending activities shall be there on the vha now */
1942 if (ql2xextended_error_logging & ql_dbg_user)
1943 msleep(random32()%10); /* Just to see if something falls on
1944 * the net we have placed below */
1945
1946 BUG_ON(atomic_read(&vha->vref_count)); 1941 BUG_ON(atomic_read(&vha->vref_count));
1947 1942
1948 qla2x00_free_fcports(vha); 1943 qla2x00_free_fcports(vha);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 1626de52e32a..fbc305f1c87c 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -15,6 +15,7 @@
15 * | Mailbox commands | 0x115b | 0x111a-0x111b | 15 * | Mailbox commands | 0x115b | 0x111a-0x111b |
16 * | | | 0x112c-0x112e | 16 * | | | 0x112c-0x112e |
17 * | | | 0x113a | 17 * | | | 0x113a |
18 * | | | 0x1155-0x1158 |
18 * | Device Discovery | 0x2087 | 0x2020-0x2022, | 19 * | Device Discovery | 0x2087 | 0x2020-0x2022, |
19 * | | | 0x2016 | 20 * | | | 0x2016 |
20 * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b | 21 * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
@@ -401,7 +402,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
401 void *ring; 402 void *ring;
402 } aq, *aqp; 403 } aq, *aqp;
403 404
404 if (!ha->tgt.atio_q_length) 405 if (!ha->tgt.atio_ring)
405 return ptr; 406 return ptr;
406 407
407 num_queues = 1; 408 num_queues = 1;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index c6509911772b..65c5ff75936b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -863,7 +863,6 @@ typedef struct {
863#define MBX_1 BIT_1 863#define MBX_1 BIT_1
864#define MBX_0 BIT_0 864#define MBX_0 BIT_0
865 865
866#define RNID_TYPE_SET_VERSION 0x9
867#define RNID_TYPE_ASIC_TEMP 0xC 866#define RNID_TYPE_ASIC_TEMP 0xC
868 867
869/* 868/*
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index eb3ca21a7f17..b310fa97b545 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -358,9 +358,6 @@ extern int
358qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); 358qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
359 359
360extern int 360extern int
361qla2x00_set_driver_version(scsi_qla_host_t *, char *);
362
363extern int
364qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, 361qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
365 uint16_t, uint16_t, uint16_t, uint16_t); 362 uint16_t, uint16_t, uint16_t, uint16_t);
366 363
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index edf4d14a1335..b59203393cb2 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -619,8 +619,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
619 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 619 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
620 qla24xx_read_fcp_prio_cfg(vha); 620 qla24xx_read_fcp_prio_cfg(vha);
621 621
622 qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
623
624 return (rval); 622 return (rval);
625} 623}
626 624
@@ -1399,7 +1397,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1399 mq_size += ha->max_rsp_queues * 1397 mq_size += ha->max_rsp_queues *
1400 (rsp->length * sizeof(response_t)); 1398 (rsp->length * sizeof(response_t));
1401 } 1399 }
1402 if (ha->tgt.atio_q_length) 1400 if (ha->tgt.atio_ring)
1403 mq_size += ha->tgt.atio_q_length * sizeof(request_t); 1401 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
1404 /* Allocate memory for Fibre Channel Event Buffer. */ 1402 /* Allocate memory for Fibre Channel Event Buffer. */
1405 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 1403 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 186dd59ce4fa..43345af56431 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3866,64 +3866,6 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3866 return rval; 3866 return rval;
3867} 3867}
3868 3868
3869int
3870qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
3871{
3872 int rval;
3873 mbx_cmd_t mc;
3874 mbx_cmd_t *mcp = &mc;
3875 int len;
3876 uint16_t dwlen;
3877 uint8_t *str;
3878 dma_addr_t str_dma;
3879 struct qla_hw_data *ha = vha->hw;
3880
3881 if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
3882 return QLA_FUNCTION_FAILED;
3883
3884 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
3885 "Entered %s.\n", __func__);
3886
3887 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
3888 if (!str) {
3889 ql_log(ql_log_warn, vha, 0x1156,
3890 "Failed to allocate driver version param.\n");
3891 return QLA_MEMORY_ALLOC_FAILED;
3892 }
3893
3894 memcpy(str, "\x7\x3\x11\x0", 4);
3895 dwlen = str[0];
3896 len = dwlen * sizeof(uint32_t) - 4;
3897 memset(str + 4, 0, len);
3898 if (len > strlen(version))
3899 len = strlen(version);
3900 memcpy(str + 4, version, len);
3901
3902 mcp->mb[0] = MBC_SET_RNID_PARAMS;
3903 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
3904 mcp->mb[2] = MSW(LSD(str_dma));
3905 mcp->mb[3] = LSW(LSD(str_dma));
3906 mcp->mb[6] = MSW(MSD(str_dma));
3907 mcp->mb[7] = LSW(MSD(str_dma));
3908 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3909 mcp->in_mb = MBX_0;
3910 mcp->tov = MBX_TOV_SECONDS;
3911 mcp->flags = 0;
3912 rval = qla2x00_mailbox_command(vha, mcp);
3913
3914 if (rval != QLA_SUCCESS) {
3915 ql_dbg(ql_dbg_mbx, vha, 0x1157,
3916 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3917 } else {
3918 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
3919 "Done %s.\n", __func__);
3920 }
3921
3922 dma_pool_free(ha->s_dma_pool, str, str_dma);
3923
3924 return rval;
3925}
3926
3927static int 3869static int
3928qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 3870qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
3929{ 3871{
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 2b6e478d9e33..ec54036d1e12 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.04.00.08-k" 10#define QLA2XXX_VERSION "8.04.00.13-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 4 13#define QLA_DRIVER_MINOR_VER 4
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 86974471af68..2a32036a9404 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4112,6 +4112,10 @@ static int st_probe(struct device *dev)
4112 tpnt->disk = disk; 4112 tpnt->disk = disk;
4113 disk->private_data = &tpnt->driver; 4113 disk->private_data = &tpnt->driver;
4114 disk->queue = SDp->request_queue; 4114 disk->queue = SDp->request_queue;
4115 /* SCSI tape doesn't register this gendisk via add_disk(). Manually
4116 * take queue reference that release_disk() expects. */
4117 if (!blk_get_queue(disk->queue))
4118 goto out_put_disk;
4115 tpnt->driver = &st_template; 4119 tpnt->driver = &st_template;
4116 4120
4117 tpnt->device = SDp; 4121 tpnt->device = SDp;
@@ -4185,7 +4189,7 @@ static int st_probe(struct device *dev)
4185 idr_preload_end(); 4189 idr_preload_end();
4186 if (error < 0) { 4190 if (error < 0) {
4187 pr_warn("st: idr allocation failed: %d\n", error); 4191 pr_warn("st: idr allocation failed: %d\n", error);
4188 goto out_put_disk; 4192 goto out_put_queue;
4189 } 4193 }
4190 tpnt->index = error; 4194 tpnt->index = error;
4191 sprintf(disk->disk_name, "st%d", tpnt->index); 4195 sprintf(disk->disk_name, "st%d", tpnt->index);
@@ -4211,6 +4215,8 @@ out_remove_devs:
4211 spin_lock(&st_index_lock); 4215 spin_lock(&st_index_lock);
4212 idr_remove(&st_index_idr, tpnt->index); 4216 idr_remove(&st_index_idr, tpnt->index);
4213 spin_unlock(&st_index_lock); 4217 spin_unlock(&st_index_lock);
4218out_put_queue:
4219 blk_put_queue(disk->queue);
4214out_put_disk: 4220out_put_disk:
4215 put_disk(disk); 4221 put_disk(disk);
4216 kfree(tpnt); 4222 kfree(tpnt);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index f80eee74a311..2be0de920d67 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -55,6 +55,7 @@ comment "SPI Master Controller Drivers"
55 55
56config SPI_ALTERA 56config SPI_ALTERA
57 tristate "Altera SPI Controller" 57 tristate "Altera SPI Controller"
58 depends on GENERIC_HARDIRQS
58 select SPI_BITBANG 59 select SPI_BITBANG
59 help 60 help
60 This is the driver for the Altera SPI Controller. 61 This is the driver for the Altera SPI Controller.
@@ -310,7 +311,7 @@ config SPI_PXA2XX_DMA
310 311
311config SPI_PXA2XX 312config SPI_PXA2XX
312 tristate "PXA2xx SSP SPI master" 313 tristate "PXA2xx SSP SPI master"
313 depends on ARCH_PXA || PCI || ACPI 314 depends on (ARCH_PXA || PCI || ACPI) && GENERIC_HARDIRQS
314 select PXA_SSP if ARCH_PXA 315 select PXA_SSP if ARCH_PXA
315 help 316 help
316 This enables using a PXA2xx or Sodaville SSP port as a SPI master 317 This enables using a PXA2xx or Sodaville SSP port as a SPI master
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 9578af782a77..d7df435d962e 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -152,7 +152,6 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
152static int bcm63xx_spi_setup(struct spi_device *spi) 152static int bcm63xx_spi_setup(struct spi_device *spi)
153{ 153{
154 struct bcm63xx_spi *bs; 154 struct bcm63xx_spi *bs;
155 int ret;
156 155
157 bs = spi_master_get_devdata(spi->master); 156 bs = spi_master_get_devdata(spi->master);
158 157
@@ -490,7 +489,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
490 default: 489 default:
491 dev_err(dev, "unsupported MSG_CTL width: %d\n", 490 dev_err(dev, "unsupported MSG_CTL width: %d\n",
492 bs->msg_ctl_width); 491 bs->msg_ctl_width);
493 goto out_clk_disable; 492 goto out_err;
494 } 493 }
495 494
496 /* Initialize hardware */ 495 /* Initialize hardware */
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 89480b281d74..3e490ee7f275 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -164,7 +164,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
164 164
165 for (i = count; i > 0; i--) { 165 for (i = count; i > 0; i--) {
166 data = tx_buf ? *tx_buf++ : 0; 166 data = tx_buf ? *tx_buf++ : 0;
167 if (len == EOFBYTE) 167 if (len == EOFBYTE && t->cs_change)
168 setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); 168 setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF);
169 out_8(&fifo->txdata_8, data); 169 out_8(&fifo->txdata_8, data);
170 len--; 170 len--;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 90b27a3508a6..810413883c79 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1168,7 +1168,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1168 1168
1169 master->dev.parent = &pdev->dev; 1169 master->dev.parent = &pdev->dev;
1170 master->dev.of_node = pdev->dev.of_node; 1170 master->dev.of_node = pdev->dev.of_node;
1171 ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev));
1172 /* the spi->mode bits understood by this driver: */ 1171 /* the spi->mode bits understood by this driver: */
1173 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 1172 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1174 1173
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index e862ab8853aa..4188b2faac5c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -994,25 +994,30 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
994{ 994{
995 struct s3c64xx_spi_driver_data *sdd = data; 995 struct s3c64xx_spi_driver_data *sdd = data;
996 struct spi_master *spi = sdd->master; 996 struct spi_master *spi = sdd->master;
997 unsigned int val; 997 unsigned int val, clr = 0;
998 998
999 val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR); 999 val = readl(sdd->regs + S3C64XX_SPI_STATUS);
1000 1000
1001 val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR | 1001 if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
1002 S3C64XX_SPI_PND_RX_UNDERRUN_CLR | 1002 clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
1003 S3C64XX_SPI_PND_TX_OVERRUN_CLR |
1004 S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
1005
1006 writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR);
1007
1008 if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR)
1009 dev_err(&spi->dev, "RX overrun\n"); 1003 dev_err(&spi->dev, "RX overrun\n");
1010 if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR) 1004 }
1005 if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
1006 clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
1011 dev_err(&spi->dev, "RX underrun\n"); 1007 dev_err(&spi->dev, "RX underrun\n");
1012 if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR) 1008 }
1009 if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
1010 clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
1013 dev_err(&spi->dev, "TX overrun\n"); 1011 dev_err(&spi->dev, "TX overrun\n");
1014 if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR) 1012 }
1013 if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
1014 clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
1015 dev_err(&spi->dev, "TX underrun\n"); 1015 dev_err(&spi->dev, "TX underrun\n");
1016 }
1017
1018 /* Clear the pending irq by setting and then clearing it */
1019 writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
1020 writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
1016 1021
1017 return IRQ_HANDLED; 1022 return IRQ_HANDLED;
1018} 1023}
@@ -1036,9 +1041,13 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
1036 writel(0, regs + S3C64XX_SPI_MODE_CFG); 1041 writel(0, regs + S3C64XX_SPI_MODE_CFG);
1037 writel(0, regs + S3C64XX_SPI_PACKET_CNT); 1042 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
1038 1043
1039 /* Clear any irq pending bits */ 1044 /* Clear any irq pending bits, should set and clear the bits */
1040 writel(readl(regs + S3C64XX_SPI_PENDING_CLR), 1045 val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
1041 regs + S3C64XX_SPI_PENDING_CLR); 1046 S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
1047 S3C64XX_SPI_PND_TX_OVERRUN_CLR |
1048 S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
1049 writel(val, regs + S3C64XX_SPI_PENDING_CLR);
1050 writel(0, regs + S3C64XX_SPI_PENDING_CLR);
1042 1051
1043 writel(0, regs + S3C64XX_SPI_SWAP_CFG); 1052 writel(0, regs + S3C64XX_SPI_SWAP_CFG);
1044 1053
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index b8698b389ef3..a829563f4713 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -858,21 +858,6 @@ static int tegra_slink_setup(struct spi_device *spi)
858 return 0; 858 return 0;
859} 859}
860 860
861static int tegra_slink_prepare_transfer(struct spi_master *master)
862{
863 struct tegra_slink_data *tspi = spi_master_get_devdata(master);
864
865 return pm_runtime_get_sync(tspi->dev);
866}
867
868static int tegra_slink_unprepare_transfer(struct spi_master *master)
869{
870 struct tegra_slink_data *tspi = spi_master_get_devdata(master);
871
872 pm_runtime_put(tspi->dev);
873 return 0;
874}
875
876static int tegra_slink_transfer_one_message(struct spi_master *master, 861static int tegra_slink_transfer_one_message(struct spi_master *master,
877 struct spi_message *msg) 862 struct spi_message *msg)
878{ 863{
@@ -885,6 +870,12 @@ static int tegra_slink_transfer_one_message(struct spi_master *master,
885 870
886 msg->status = 0; 871 msg->status = 0;
887 msg->actual_length = 0; 872 msg->actual_length = 0;
873 ret = pm_runtime_get_sync(tspi->dev);
874 if (ret < 0) {
875 dev_err(tspi->dev, "runtime get failed: %d\n", ret);
876 goto done;
877 }
878
888 single_xfer = list_is_singular(&msg->transfers); 879 single_xfer = list_is_singular(&msg->transfers);
889 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 880 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
890 INIT_COMPLETION(tspi->xfer_completion); 881 INIT_COMPLETION(tspi->xfer_completion);
@@ -921,6 +912,8 @@ static int tegra_slink_transfer_one_message(struct spi_master *master,
921exit: 912exit:
922 tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); 913 tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
923 tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); 914 tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
915 pm_runtime_put(tspi->dev);
916done:
924 msg->status = ret; 917 msg->status = ret;
925 spi_finalize_current_message(master); 918 spi_finalize_current_message(master);
926 return ret; 919 return ret;
@@ -1148,9 +1141,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
1148 /* the spi->mode bits understood by this driver: */ 1141 /* the spi->mode bits understood by this driver: */
1149 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1142 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1150 master->setup = tegra_slink_setup; 1143 master->setup = tegra_slink_setup;
1151 master->prepare_transfer_hardware = tegra_slink_prepare_transfer;
1152 master->transfer_one_message = tegra_slink_transfer_one_message; 1144 master->transfer_one_message = tegra_slink_transfer_one_message;
1153 master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer;
1154 master->num_chipselect = MAX_CHIP_SELECT; 1145 master->num_chipselect = MAX_CHIP_SELECT;
1155 master->bus_num = -1; 1146 master->bus_num = -1;
1156 1147
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index f996c600eb8c..004b10f184d4 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -543,17 +543,16 @@ static void spi_pump_messages(struct kthread_work *work)
543 /* Lock queue and check for queue work */ 543 /* Lock queue and check for queue work */
544 spin_lock_irqsave(&master->queue_lock, flags); 544 spin_lock_irqsave(&master->queue_lock, flags);
545 if (list_empty(&master->queue) || !master->running) { 545 if (list_empty(&master->queue) || !master->running) {
546 if (master->busy && master->unprepare_transfer_hardware) { 546 if (!master->busy) {
547 ret = master->unprepare_transfer_hardware(master); 547 spin_unlock_irqrestore(&master->queue_lock, flags);
548 if (ret) { 548 return;
549 spin_unlock_irqrestore(&master->queue_lock, flags);
550 dev_err(&master->dev,
551 "failed to unprepare transfer hardware\n");
552 return;
553 }
554 } 549 }
555 master->busy = false; 550 master->busy = false;
556 spin_unlock_irqrestore(&master->queue_lock, flags); 551 spin_unlock_irqrestore(&master->queue_lock, flags);
552 if (master->unprepare_transfer_hardware &&
553 master->unprepare_transfer_hardware(master))
554 dev_err(&master->dev,
555 "failed to unprepare transfer hardware\n");
557 return; 556 return;
558 } 557 }
559 558
@@ -984,7 +983,7 @@ static void acpi_register_spi_devices(struct spi_master *master)
984 acpi_status status; 983 acpi_status status;
985 acpi_handle handle; 984 acpi_handle handle;
986 985
987 handle = ACPI_HANDLE(&master->dev); 986 handle = ACPI_HANDLE(master->dev.parent);
988 if (!handle) 987 if (!handle)
989 return; 988 return;
990 989
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 81a1fe661579..71a73ec5af8d 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -1483,7 +1483,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
1483 case TRIG_NONE: 1483 case TRIG_NONE:
1484 /* continous acquisition */ 1484 /* continous acquisition */
1485 devpriv->ai_continous = 1; 1485 devpriv->ai_continous = 1;
1486 devpriv->ai_sample_count = 0; 1486 devpriv->ai_sample_count = 1;
1487 break; 1487 break;
1488 } 1488 }
1489 1489
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
index 73582705e8c5..5c3714530961 100644
--- a/drivers/staging/zcache/Kconfig
+++ b/drivers/staging/zcache/Kconfig
@@ -15,7 +15,7 @@ config RAMSTER
15 depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE=y 15 depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE=y
16 depends on NET 16 depends on NET
17 # must ensure struct page is 8-byte aligned 17 # must ensure struct page is 8-byte aligned
18 select HAVE_ALIGNED_STRUCT_PAGE if !64_BIT 18 select HAVE_ALIGNED_STRUCT_PAGE if !64BIT
19 default n 19 default n
20 help 20 help
21 RAMster allows RAM on other machines in a cluster to be utilized 21 RAMster allows RAM on other machines in a cluster to be utilized
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index ff1c5ee352cb..cbe48ab41745 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -409,6 +409,7 @@ static inline int core_alua_state_standby(
409 case REPORT_LUNS: 409 case REPORT_LUNS:
410 case RECEIVE_DIAGNOSTIC: 410 case RECEIVE_DIAGNOSTIC:
411 case SEND_DIAGNOSTIC: 411 case SEND_DIAGNOSTIC:
412 return 0;
412 case MAINTENANCE_IN: 413 case MAINTENANCE_IN:
413 switch (cdb[1] & 0x1f) { 414 switch (cdb[1] & 0x1f) {
414 case MI_REPORT_TARGET_PGS: 415 case MI_REPORT_TARGET_PGS:
@@ -451,6 +452,7 @@ static inline int core_alua_state_unavailable(
451 switch (cdb[0]) { 452 switch (cdb[0]) {
452 case INQUIRY: 453 case INQUIRY:
453 case REPORT_LUNS: 454 case REPORT_LUNS:
455 return 0;
454 case MAINTENANCE_IN: 456 case MAINTENANCE_IN:
455 switch (cdb[1] & 0x1f) { 457 switch (cdb[1] & 0x1f) {
456 case MI_REPORT_TARGET_PGS: 458 case MI_REPORT_TARGET_PGS:
@@ -491,6 +493,7 @@ static inline int core_alua_state_transition(
491 switch (cdb[0]) { 493 switch (cdb[0]) {
492 case INQUIRY: 494 case INQUIRY:
493 case REPORT_LUNS: 495 case REPORT_LUNS:
496 return 0;
494 case MAINTENANCE_IN: 497 case MAINTENANCE_IN:
495 switch (cdb[1] & 0x1f) { 498 switch (cdb[1] & 0x1f) {
496 case MI_REPORT_TARGET_PGS: 499 case MI_REPORT_TARGET_PGS:
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 2030b608136d..3243ea790eab 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1139,8 +1139,10 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1139 return ret; 1139 return ret;
1140 1140
1141 ret = target_check_reservation(cmd); 1141 ret = target_check_reservation(cmd);
1142 if (ret) 1142 if (ret) {
1143 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1143 return ret; 1144 return ret;
1145 }
1144 1146
1145 ret = dev->transport->parse_cdb(cmd); 1147 ret = dev->transport->parse_cdb(cmd);
1146 if (ret) 1148 if (ret)
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 484b6a3c9b03..302909ccf183 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -2643,9 +2643,9 @@ static int mxser_probe(struct pci_dev *pdev,
2643 mxvar_sdriver, brd->idx + i, &pdev->dev); 2643 mxvar_sdriver, brd->idx + i, &pdev->dev);
2644 if (IS_ERR(tty_dev)) { 2644 if (IS_ERR(tty_dev)) {
2645 retval = PTR_ERR(tty_dev); 2645 retval = PTR_ERR(tty_dev);
2646 for (i--; i >= 0; i--) 2646 for (; i > 0; i--)
2647 tty_unregister_device(mxvar_sdriver, 2647 tty_unregister_device(mxvar_sdriver,
2648 brd->idx + i); 2648 brd->idx + i - 1);
2649 goto err_relbrd; 2649 goto err_relbrd;
2650 } 2650 }
2651 } 2651 }
@@ -2751,9 +2751,9 @@ static int __init mxser_module_init(void)
2751 tty_dev = tty_port_register_device(&brd->ports[i].port, 2751 tty_dev = tty_port_register_device(&brd->ports[i].port,
2752 mxvar_sdriver, brd->idx + i, NULL); 2752 mxvar_sdriver, brd->idx + i, NULL);
2753 if (IS_ERR(tty_dev)) { 2753 if (IS_ERR(tty_dev)) {
2754 for (i--; i >= 0; i--) 2754 for (; i > 0; i--)
2755 tty_unregister_device(mxvar_sdriver, 2755 tty_unregister_device(mxvar_sdriver,
2756 brd->idx + i); 2756 brd->idx + i - 1);
2757 for (i = 0; i < brd->info->nports; i++) 2757 for (i = 0; i < brd->info->nports; i++)
2758 tty_port_destroy(&brd->ports[i].port); 2758 tty_port_destroy(&brd->ports[i].port);
2759 free_irq(brd->irq, brd); 2759 free_irq(brd->irq, brd);
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250_core.c
index cf6a5383748a..35f9c96aada9 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -3418,6 +3418,7 @@ MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
3418#endif 3418#endif
3419MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR); 3419MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
3420 3420
3421#ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS
3421#ifndef MODULE 3422#ifndef MODULE
3422/* This module was renamed to 8250_core in 3.7. Keep the old "8250" name 3423/* This module was renamed to 8250_core in 3.7. Keep the old "8250" name
3423 * working as well for the module options so we don't break people. We 3424 * working as well for the module options so we don't break people. We
@@ -3432,7 +3433,7 @@ MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
3432static void __used s8250_options(void) 3433static void __used s8250_options(void)
3433{ 3434{
3434#undef MODULE_PARAM_PREFIX 3435#undef MODULE_PARAM_PREFIX
3435#define MODULE_PARAM_PREFIX "8250." 3436#define MODULE_PARAM_PREFIX "8250_core."
3436 3437
3437 module_param_cb(share_irqs, &param_ops_uint, &share_irqs, 0644); 3438 module_param_cb(share_irqs, &param_ops_uint, &share_irqs, 0644);
3438 module_param_cb(nr_uarts, &param_ops_uint, &nr_uarts, 0644); 3439 module_param_cb(nr_uarts, &param_ops_uint, &nr_uarts, 0644);
@@ -3444,5 +3445,6 @@ static void __used s8250_options(void)
3444#endif 3445#endif
3445} 3446}
3446#else 3447#else
3447MODULE_ALIAS("8250"); 3448MODULE_ALIAS("8250_core");
3449#endif
3448#endif 3450#endif
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index aa76825229dc..26e3a97ab157 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1554,6 +1554,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
1554#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001 1554#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
1555#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d 1555#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
1556#define PCI_VENDOR_ID_WCH 0x4348 1556#define PCI_VENDOR_ID_WCH 0x4348
1557#define PCI_DEVICE_ID_WCH_CH352_2S 0x3253
1557#define PCI_DEVICE_ID_WCH_CH353_4S 0x3453 1558#define PCI_DEVICE_ID_WCH_CH353_4S 0x3453
1558#define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046 1559#define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046
1559#define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053 1560#define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053
@@ -2172,6 +2173,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
2172 .subdevice = PCI_ANY_ID, 2173 .subdevice = PCI_ANY_ID,
2173 .setup = pci_wch_ch353_setup, 2174 .setup = pci_wch_ch353_setup,
2174 }, 2175 },
2176 /* WCH CH352 2S card (16550 clone) */
2177 {
2178 .vendor = PCI_VENDOR_ID_WCH,
2179 .device = PCI_DEVICE_ID_WCH_CH352_2S,
2180 .subvendor = PCI_ANY_ID,
2181 .subdevice = PCI_ANY_ID,
2182 .setup = pci_wch_ch353_setup,
2183 },
2175 /* 2184 /*
2176 * ASIX devices with FIFO bug 2185 * ASIX devices with FIFO bug
2177 */ 2186 */
@@ -4870,6 +4879,10 @@ static struct pci_device_id serial_pci_tbl[] = {
4870 PCI_ANY_ID, PCI_ANY_ID, 4879 PCI_ANY_ID, PCI_ANY_ID,
4871 0, 0, pbn_b0_bt_2_115200 }, 4880 0, 0, pbn_b0_bt_2_115200 },
4872 4881
4882 { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S,
4883 PCI_ANY_ID, PCI_ANY_ID,
4884 0, 0, pbn_b0_bt_2_115200 },
4885
4873 /* 4886 /*
4874 * Commtech, Inc. Fastcom adapters 4887 * Commtech, Inc. Fastcom adapters
4875 */ 4888 */
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index b3455a970a1d..35d9ab95c5cb 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -429,7 +429,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
429{ 429{
430 struct uart_8250_port uart; 430 struct uart_8250_port uart;
431 int ret, line, flags = dev_id->driver_data; 431 int ret, line, flags = dev_id->driver_data;
432 struct resource *res = NULL;
433 432
434 if (flags & UNKNOWN_DEV) { 433 if (flags & UNKNOWN_DEV) {
435 ret = serial_pnp_guess_board(dev); 434 ret = serial_pnp_guess_board(dev);
@@ -440,12 +439,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
440 memset(&uart, 0, sizeof(uart)); 439 memset(&uart, 0, sizeof(uart));
441 if (pnp_irq_valid(dev, 0)) 440 if (pnp_irq_valid(dev, 0))
442 uart.port.irq = pnp_irq(dev, 0); 441 uart.port.irq = pnp_irq(dev, 0);
443 if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) 442 if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) {
444 res = pnp_get_resource(dev, IORESOURCE_IO, 2); 443 uart.port.iobase = pnp_port_start(dev, 2);
445 else if (pnp_port_valid(dev, 0)) 444 uart.port.iotype = UPIO_PORT;
446 res = pnp_get_resource(dev, IORESOURCE_IO, 0); 445 } else if (pnp_port_valid(dev, 0)) {
447 if (pnp_resource_enabled(res)) { 446 uart.port.iobase = pnp_port_start(dev, 0);
448 uart.port.iobase = res->start;
449 uart.port.iotype = UPIO_PORT; 447 uart.port.iotype = UPIO_PORT;
450 } else if (pnp_mem_valid(dev, 0)) { 448 } else if (pnp_mem_valid(dev, 0)) {
451 uart.port.mapbase = pnp_mem_start(dev, 0); 449 uart.port.mapbase = pnp_mem_start(dev, 0);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 2ef9537bcb2c..80fe91e64a52 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -33,6 +33,23 @@ config SERIAL_8250
33 Most people will say Y or M here, so that they can use serial mice, 33 Most people will say Y or M here, so that they can use serial mice,
34 modems and similar devices connecting to the standard serial ports. 34 modems and similar devices connecting to the standard serial ports.
35 35
36config SERIAL_8250_DEPRECATED_OPTIONS
37 bool "Support 8250_core.* kernel options (DEPRECATED)"
38 depends on SERIAL_8250
39 default y
40 ---help---
41 In 3.7 we renamed 8250 to 8250_core by mistake, so now we have to
42 accept kernel parameters in both forms like 8250_core.nr_uarts=4 and
43 8250.nr_uarts=4. We now renamed the module back to 8250, but if
44 anybody noticed in 3.7 and changed their userspace we still have to
45 keep the 8350_core.* options around until they revert the changes
46 they already did.
47
48 If 8250 is built as a module, this adds 8250_core alias instead.
49
50 If you did not notice yet and/or you have userspace from pre-3.7, it
51 is safe (and recommended) to say N here.
52
36config SERIAL_8250_PNP 53config SERIAL_8250_PNP
37 bool "8250/16550 PNP device support" if EXPERT 54 bool "8250/16550 PNP device support" if EXPERT
38 depends on SERIAL_8250 && PNP 55 depends on SERIAL_8250 && PNP
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index a23838a4d535..36d68d054307 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -2,10 +2,10 @@
2# Makefile for the 8250 serial device drivers. 2# Makefile for the 8250 serial device drivers.
3# 3#
4 4
5obj-$(CONFIG_SERIAL_8250) += 8250_core.o 5obj-$(CONFIG_SERIAL_8250) += 8250.o
68250_core-y := 8250.o 68250-y := 8250_core.o
78250_core-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o 78250-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
88250_core-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o 88250-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o
9obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o 9obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
10obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o 10obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
11obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o 11obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index d4a7c241b751..3467462869ce 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -158,7 +158,7 @@ struct atmel_uart_port {
158}; 158};
159 159
160static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; 160static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
161static unsigned long atmel_ports_in_use; 161static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
162 162
163#ifdef SUPPORT_SYSRQ 163#ifdef SUPPORT_SYSRQ
164static struct console atmel_console; 164static struct console atmel_console;
@@ -1769,15 +1769,14 @@ static int atmel_serial_probe(struct platform_device *pdev)
1769 if (ret < 0) 1769 if (ret < 0)
1770 /* port id not found in platform data nor device-tree aliases: 1770 /* port id not found in platform data nor device-tree aliases:
1771 * auto-enumerate it */ 1771 * auto-enumerate it */
1772 ret = find_first_zero_bit(&atmel_ports_in_use, 1772 ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
1773 sizeof(atmel_ports_in_use));
1774 1773
1775 if (ret > ATMEL_MAX_UART) { 1774 if (ret >= ATMEL_MAX_UART) {
1776 ret = -ENODEV; 1775 ret = -ENODEV;
1777 goto err; 1776 goto err;
1778 } 1777 }
1779 1778
1780 if (test_and_set_bit(ret, &atmel_ports_in_use)) { 1779 if (test_and_set_bit(ret, atmel_ports_in_use)) {
1781 /* port already in use */ 1780 /* port already in use */
1782 ret = -EBUSY; 1781 ret = -EBUSY;
1783 goto err; 1782 goto err;
@@ -1857,7 +1856,7 @@ static int atmel_serial_remove(struct platform_device *pdev)
1857 1856
1858 /* "port" is allocated statically, so we shouldn't free it */ 1857 /* "port" is allocated statically, so we shouldn't free it */
1859 1858
1860 clear_bit(port->line, &atmel_ports_in_use); 1859 clear_bit(port->line, atmel_ports_in_use);
1861 1860
1862 clk_put(atmel_port->clk); 1861 clk_put(atmel_port->clk);
1863 1862
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 4dc41408ecb7..30d4f7a783cd 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -886,6 +886,17 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
886 serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); 886 serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
887 /* FIFO ENABLE, DMA MODE */ 887 /* FIFO ENABLE, DMA MODE */
888 888
889 up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
890 /*
891 * NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK
892 * sets Enables the granularity of 1 for TRIGGER RX
893 * level. Along with setting RX FIFO trigger level
894 * to 1 (as noted below, 16 characters) and TLR[3:0]
895 * to zero this will result RX FIFO threshold level
896 * to 1 character, instead of 16 as noted in comment
897 * below.
898 */
899
889 /* Set receive FIFO threshold to 16 characters and 900 /* Set receive FIFO threshold to 16 characters and
890 * transmit FIFO threshold to 16 spaces 901 * transmit FIFO threshold to 16 spaces
891 */ 902 */
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index ba451c7209fc..f36bbba1ac8b 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -578,6 +578,8 @@ static int xuartps_startup(struct uart_port *port)
578 /* Receive Timeout register is enabled with value of 10 */ 578 /* Receive Timeout register is enabled with value of 10 */
579 xuartps_writel(10, XUARTPS_RXTOUT_OFFSET); 579 xuartps_writel(10, XUARTPS_RXTOUT_OFFSET);
580 580
581 /* Clear out any pending interrupts before enabling them */
582 xuartps_writel(xuartps_readl(XUARTPS_ISR_OFFSET), XUARTPS_ISR_OFFSET);
581 583
582 /* Set the Interrupt Registers with desired interrupts */ 584 /* Set the Interrupt Registers with desired interrupts */
583 xuartps_writel(XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_PARITY | 585 xuartps_writel(XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_PARITY |
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index e4ca345873c3..d7799deacb21 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -93,7 +93,7 @@ vcs_poll_data_free(struct vcs_poll_data *poll)
93static struct vcs_poll_data * 93static struct vcs_poll_data *
94vcs_poll_data_get(struct file *file) 94vcs_poll_data_get(struct file *file)
95{ 95{
96 struct vcs_poll_data *poll = file->private_data; 96 struct vcs_poll_data *poll = file->private_data, *kill = NULL;
97 97
98 if (poll) 98 if (poll)
99 return poll; 99 return poll;
@@ -122,10 +122,12 @@ vcs_poll_data_get(struct file *file)
122 file->private_data = poll; 122 file->private_data = poll;
123 } else { 123 } else {
124 /* someone else raced ahead of us */ 124 /* someone else raced ahead of us */
125 vcs_poll_data_free(poll); 125 kill = poll;
126 poll = file->private_data; 126 poll = file->private_data;
127 } 127 }
128 spin_unlock(&file->f_lock); 128 spin_unlock(&file->f_lock);
129 if (kill)
130 vcs_poll_data_free(kill);
129 131
130 return poll; 132 return poll;
131} 133}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 99b34a30354f..f9ec44cbb82f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2412,6 +2412,14 @@ int usb_hcd_is_primary_hcd(struct usb_hcd *hcd)
2412} 2412}
2413EXPORT_SYMBOL_GPL(usb_hcd_is_primary_hcd); 2413EXPORT_SYMBOL_GPL(usb_hcd_is_primary_hcd);
2414 2414
2415int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1)
2416{
2417 if (!hcd->driver->find_raw_port_number)
2418 return port1;
2419
2420 return hcd->driver->find_raw_port_number(hcd, port1);
2421}
2422
2415static int usb_hcd_request_irqs(struct usb_hcd *hcd, 2423static int usb_hcd_request_irqs(struct usb_hcd *hcd,
2416 unsigned int irqnum, unsigned long irqflags) 2424 unsigned int irqnum, unsigned long irqflags)
2417{ 2425{
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 797f9d514732..65d4e55552c6 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -67,7 +67,6 @@ static void usb_port_device_release(struct device *dev)
67{ 67{
68 struct usb_port *port_dev = to_usb_port(dev); 68 struct usb_port *port_dev = to_usb_port(dev);
69 69
70 dev_pm_qos_hide_flags(dev);
71 kfree(port_dev); 70 kfree(port_dev);
72} 71}
73 72
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index b6f4bad3f756..255c14464bf2 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/acpi.h> 16#include <linux/acpi.h>
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/usb/hcd.h>
18#include <acpi/acpi_bus.h> 19#include <acpi/acpi_bus.h>
19 20
20#include "usb.h" 21#include "usb.h"
@@ -188,8 +189,13 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
188 * connected to. 189 * connected to.
189 */ 190 */
190 if (!udev->parent) { 191 if (!udev->parent) {
191 *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev), 192 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
193 int raw_port_num;
194
195 raw_port_num = usb_hcd_find_raw_port_number(hcd,
192 port_num); 196 port_num);
197 *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev),
198 raw_port_num);
193 if (!*handle) 199 if (!*handle)
194 return -ENODEV; 200 return -ENODEV;
195 } else { 201 } else {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 5a0c541daf89..c7525b1cad74 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -145,6 +145,7 @@ config USB_LPC32XX
145 tristate "LPC32XX USB Peripheral Controller" 145 tristate "LPC32XX USB Peripheral Controller"
146 depends on ARCH_LPC32XX 146 depends on ARCH_LPC32XX
147 select USB_ISP1301 147 select USB_ISP1301
148 select USB_OTG_UTILS
148 help 149 help
149 This option selects the USB device controller in the LPC32xx SoC. 150 This option selects the USB device controller in the LPC32xx SoC.
150 151
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index b476daf49f6f..010f686d8881 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1214,6 +1214,7 @@ itd_urb_transaction (
1214 1214
1215 memset (itd, 0, sizeof *itd); 1215 memset (itd, 0, sizeof *itd);
1216 itd->itd_dma = itd_dma; 1216 itd->itd_dma = itd_dma;
1217 itd->frame = 9999; /* an invalid value */
1217 list_add (&itd->itd_list, &sched->td_list); 1218 list_add (&itd->itd_list, &sched->td_list);
1218 } 1219 }
1219 spin_unlock_irqrestore (&ehci->lock, flags); 1220 spin_unlock_irqrestore (&ehci->lock, flags);
@@ -1915,6 +1916,7 @@ sitd_urb_transaction (
1915 1916
1916 memset (sitd, 0, sizeof *sitd); 1917 memset (sitd, 0, sizeof *sitd);
1917 sitd->sitd_dma = sitd_dma; 1918 sitd->sitd_dma = sitd_dma;
1919 sitd->frame = 9999; /* an invalid value */
1918 list_add (&sitd->sitd_list, &iso_sched->td_list); 1920 list_add (&sitd->sitd_list, &iso_sched->td_list);
1919 } 1921 }
1920 1922
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 35616ffbe3ae..6dc238c592bc 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1022,44 +1022,24 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1022 * is attached to (or the roothub port its ancestor hub is attached to). All we 1022 * is attached to (or the roothub port its ancestor hub is attached to). All we
1023 * know is the index of that port under either the USB 2.0 or the USB 3.0 1023 * know is the index of that port under either the USB 2.0 or the USB 3.0
1024 * roothub, but that doesn't give us the real index into the HW port status 1024 * roothub, but that doesn't give us the real index into the HW port status
1025 * registers. Scan through the xHCI roothub port array, looking for the Nth 1025 * registers. Call xhci_find_raw_port_number() to get real index.
1026 * entry of the correct port speed. Return the port number of that entry.
1027 */ 1026 */
1028static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, 1027static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1029 struct usb_device *udev) 1028 struct usb_device *udev)
1030{ 1029{
1031 struct usb_device *top_dev; 1030 struct usb_device *top_dev;
1032 unsigned int num_similar_speed_ports; 1031 struct usb_hcd *hcd;
1033 unsigned int faked_port_num; 1032
1034 int i; 1033 if (udev->speed == USB_SPEED_SUPER)
1034 hcd = xhci->shared_hcd;
1035 else
1036 hcd = xhci->main_hcd;
1035 1037
1036 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 1038 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1037 top_dev = top_dev->parent) 1039 top_dev = top_dev->parent)
1038 /* Found device below root hub */; 1040 /* Found device below root hub */;
1039 faked_port_num = top_dev->portnum;
1040 for (i = 0, num_similar_speed_ports = 0;
1041 i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
1042 u8 port_speed = xhci->port_array[i];
1043
1044 /*
1045 * Skip ports that don't have known speeds, or have duplicate
1046 * Extended Capabilities port speed entries.
1047 */
1048 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1049 continue;
1050 1041
1051 /* 1042 return xhci_find_raw_port_number(hcd, top_dev->portnum);
1052 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
1053 * 1.1 ports are under the USB 2.0 hub. If the port speed
1054 * matches the device speed, it's a similar speed port.
1055 */
1056 if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER))
1057 num_similar_speed_ports++;
1058 if (num_similar_speed_ports == faked_port_num)
1059 /* Roothub ports are numbered from 1 to N */
1060 return i+1;
1061 }
1062 return 0;
1063} 1043}
1064 1044
1065/* Setup an xHCI virtual device for a Set Address command */ 1045/* Setup an xHCI virtual device for a Set Address command */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index af259e0ec172..1a30c380043c 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -313,6 +313,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
313 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, 313 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
314 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, 314 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
315 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, 315 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
316 .find_raw_port_number = xhci_find_raw_port_number,
316}; 317};
317 318
318/*-------------------------------------------------------------------------*/ 319/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 882875465301..1969c001b3f9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1599,14 +1599,20 @@ static void handle_port_status(struct xhci_hcd *xhci,
1599 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1599 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1600 if ((port_id <= 0) || (port_id > max_ports)) { 1600 if ((port_id <= 0) || (port_id > max_ports)) {
1601 xhci_warn(xhci, "Invalid port id %d\n", port_id); 1601 xhci_warn(xhci, "Invalid port id %d\n", port_id);
1602 bogus_port_status = true; 1602 inc_deq(xhci, xhci->event_ring);
1603 goto cleanup; 1603 return;
1604 } 1604 }
1605 1605
1606 /* Figure out which usb_hcd this port is attached to: 1606 /* Figure out which usb_hcd this port is attached to:
1607 * is it a USB 3.0 port or a USB 2.0/1.1 port? 1607 * is it a USB 3.0 port or a USB 2.0/1.1 port?
1608 */ 1608 */
1609 major_revision = xhci->port_array[port_id - 1]; 1609 major_revision = xhci->port_array[port_id - 1];
1610
1611 /* Find the right roothub. */
1612 hcd = xhci_to_hcd(xhci);
1613 if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1614 hcd = xhci->shared_hcd;
1615
1610 if (major_revision == 0) { 1616 if (major_revision == 0) {
1611 xhci_warn(xhci, "Event for port %u not in " 1617 xhci_warn(xhci, "Event for port %u not in "
1612 "Extended Capabilities, ignoring.\n", 1618 "Extended Capabilities, ignoring.\n",
@@ -1629,10 +1635,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
1629 * into the index into the ports on the correct split roothub, and the 1635 * into the index into the ports on the correct split roothub, and the
1630 * correct bus_state structure. 1636 * correct bus_state structure.
1631 */ 1637 */
1632 /* Find the right roothub. */
1633 hcd = xhci_to_hcd(xhci);
1634 if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1635 hcd = xhci->shared_hcd;
1636 bus_state = &xhci->bus_state[hcd_index(hcd)]; 1638 bus_state = &xhci->bus_state[hcd_index(hcd)];
1637 if (hcd->speed == HCD_USB3) 1639 if (hcd->speed == HCD_USB3)
1638 port_array = xhci->usb3_ports; 1640 port_array = xhci->usb3_ports;
@@ -2027,8 +2029,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
2027 if (event_trb != ep_ring->dequeue && 2029 if (event_trb != ep_ring->dequeue &&
2028 event_trb != td->last_trb) 2030 event_trb != td->last_trb)
2029 td->urb->actual_length = 2031 td->urb->actual_length =
2030 td->urb->transfer_buffer_length 2032 td->urb->transfer_buffer_length -
2031 - TRB_LEN(le32_to_cpu(event->transfer_len)); 2033 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2032 else 2034 else
2033 td->urb->actual_length = 0; 2035 td->urb->actual_length = 0;
2034 2036
@@ -2060,7 +2062,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
2060 /* Maybe the event was for the data stage? */ 2062 /* Maybe the event was for the data stage? */
2061 td->urb->actual_length = 2063 td->urb->actual_length =
2062 td->urb->transfer_buffer_length - 2064 td->urb->transfer_buffer_length -
2063 TRB_LEN(le32_to_cpu(event->transfer_len)); 2065 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2064 xhci_dbg(xhci, "Waiting for status " 2066 xhci_dbg(xhci, "Waiting for status "
2065 "stage event\n"); 2067 "stage event\n");
2066 return 0; 2068 return 0;
@@ -2096,7 +2098,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2096 /* handle completion code */ 2098 /* handle completion code */
2097 switch (trb_comp_code) { 2099 switch (trb_comp_code) {
2098 case COMP_SUCCESS: 2100 case COMP_SUCCESS:
2099 if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { 2101 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2100 frame->status = 0; 2102 frame->status = 0;
2101 break; 2103 break;
2102 } 2104 }
@@ -2141,7 +2143,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2141 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 2143 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2142 } 2144 }
2143 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 2145 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2144 TRB_LEN(le32_to_cpu(event->transfer_len)); 2146 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2145 2147
2146 if (trb_comp_code != COMP_STOP_INVAL) { 2148 if (trb_comp_code != COMP_STOP_INVAL) {
2147 frame->actual_length = len; 2149 frame->actual_length = len;
@@ -2199,7 +2201,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2199 case COMP_SUCCESS: 2201 case COMP_SUCCESS:
2200 /* Double check that the HW transferred everything. */ 2202 /* Double check that the HW transferred everything. */
2201 if (event_trb != td->last_trb || 2203 if (event_trb != td->last_trb ||
2202 TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 2204 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2203 xhci_warn(xhci, "WARN Successful completion " 2205 xhci_warn(xhci, "WARN Successful completion "
2204 "on short TX\n"); 2206 "on short TX\n");
2205 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 2207 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
@@ -2227,18 +2229,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2227 "%d bytes untransferred\n", 2229 "%d bytes untransferred\n",
2228 td->urb->ep->desc.bEndpointAddress, 2230 td->urb->ep->desc.bEndpointAddress,
2229 td->urb->transfer_buffer_length, 2231 td->urb->transfer_buffer_length,
2230 TRB_LEN(le32_to_cpu(event->transfer_len))); 2232 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2231 /* Fast path - was this the last TRB in the TD for this URB? */ 2233 /* Fast path - was this the last TRB in the TD for this URB? */
2232 if (event_trb == td->last_trb) { 2234 if (event_trb == td->last_trb) {
2233 if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 2235 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2234 td->urb->actual_length = 2236 td->urb->actual_length =
2235 td->urb->transfer_buffer_length - 2237 td->urb->transfer_buffer_length -
2236 TRB_LEN(le32_to_cpu(event->transfer_len)); 2238 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2237 if (td->urb->transfer_buffer_length < 2239 if (td->urb->transfer_buffer_length <
2238 td->urb->actual_length) { 2240 td->urb->actual_length) {
2239 xhci_warn(xhci, "HC gave bad length " 2241 xhci_warn(xhci, "HC gave bad length "
2240 "of %d bytes left\n", 2242 "of %d bytes left\n",
2241 TRB_LEN(le32_to_cpu(event->transfer_len))); 2243 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2242 td->urb->actual_length = 0; 2244 td->urb->actual_length = 0;
2243 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 2245 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2244 *status = -EREMOTEIO; 2246 *status = -EREMOTEIO;
@@ -2280,7 +2282,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2280 if (trb_comp_code != COMP_STOP_INVAL) 2282 if (trb_comp_code != COMP_STOP_INVAL)
2281 td->urb->actual_length += 2283 td->urb->actual_length +=
2282 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 2284 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2283 TRB_LEN(le32_to_cpu(event->transfer_len)); 2285 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2284 } 2286 }
2285 2287
2286 return finish_td(xhci, td, event_trb, event, ep, status, false); 2288 return finish_td(xhci, td, event_trb, event, ep, status, false);
@@ -2368,7 +2370,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2368 * transfer type 2370 * transfer type
2369 */ 2371 */
2370 case COMP_SUCCESS: 2372 case COMP_SUCCESS:
2371 if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) 2373 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2372 break; 2374 break;
2373 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) 2375 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2374 trb_comp_code = COMP_SHORT_TX; 2376 trb_comp_code = COMP_SHORT_TX;
@@ -2461,14 +2463,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2461 * TD list. 2463 * TD list.
2462 */ 2464 */
2463 if (list_empty(&ep_ring->td_list)) { 2465 if (list_empty(&ep_ring->td_list)) {
2464 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " 2466 /*
2465 "with no TDs queued?\n", 2467 * A stopped endpoint may generate an extra completion
2466 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2468 * event if the device was suspended. Don't print
2467 ep_index); 2469 * warnings.
2468 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 2470 */
2469 (le32_to_cpu(event->flags) & 2471 if (!(trb_comp_code == COMP_STOP ||
2470 TRB_TYPE_BITMASK)>>10); 2472 trb_comp_code == COMP_STOP_INVAL)) {
2471 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 2473 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2474 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2475 ep_index);
2476 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2477 (le32_to_cpu(event->flags) &
2478 TRB_TYPE_BITMASK)>>10);
2479 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2480 }
2472 if (ep->skip) { 2481 if (ep->skip) {
2473 ep->skip = false; 2482 ep->skip = false;
2474 xhci_dbg(xhci, "td_list is empty while skip " 2483 xhci_dbg(xhci, "td_list is empty while skip "
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 849470b18831..53b8f89a0b1c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3779,6 +3779,28 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3779 return 0; 3779 return 0;
3780} 3780}
3781 3781
3782/*
3783 * Transfer the port index into real index in the HW port status
3784 * registers. Caculate offset between the port's PORTSC register
3785 * and port status base. Divide the number of per port register
3786 * to get the real index. The raw port number bases 1.
3787 */
3788int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3789{
3790 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3791 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
3792 __le32 __iomem *addr;
3793 int raw_port;
3794
3795 if (hcd->speed != HCD_USB3)
3796 addr = xhci->usb2_ports[port1 - 1];
3797 else
3798 addr = xhci->usb3_ports[port1 - 1];
3799
3800 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
3801 return raw_port;
3802}
3803
3782#ifdef CONFIG_USB_SUSPEND 3804#ifdef CONFIG_USB_SUSPEND
3783 3805
3784/* BESL to HIRD Encoding array for USB2 LPM */ 3806/* BESL to HIRD Encoding array for USB2 LPM */
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 2c510e4a7d4c..63582719e0fb 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -972,6 +972,10 @@ struct xhci_transfer_event {
972 __le32 flags; 972 __le32 flags;
973}; 973};
974 974
975/* Transfer event TRB length bit mask */
976/* bits 0:23 */
977#define EVENT_TRB_LEN(p) ((p) & 0xffffff)
978
975/** Transfer Event bit fields **/ 979/** Transfer Event bit fields **/
976#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f) 980#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
977 981
@@ -1829,6 +1833,7 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
1829int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, 1833int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
1830 char *buf, u16 wLength); 1834 char *buf, u16 wLength);
1831int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); 1835int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
1836int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
1832 1837
1833#ifdef CONFIG_PM 1838#ifdef CONFIG_PM
1834int xhci_bus_suspend(struct usb_hcd *hcd); 1839int xhci_bus_suspend(struct usb_hcd *hcd);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 65217a590068..90549382eba5 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -38,6 +38,7 @@ config USB_ISP1301
38 tristate "NXP ISP1301 USB transceiver support" 38 tristate "NXP ISP1301 USB transceiver support"
39 depends on USB || USB_GADGET 39 depends on USB || USB_GADGET
40 depends on I2C 40 depends on I2C
41 select USB_OTG_UTILS
41 help 42 help
42 Say Y here to add support for the NXP ISP1301 USB transceiver driver. 43 Say Y here to add support for the NXP ISP1301 USB transceiver driver.
43 This chip is typically used as USB transceiver for USB host, gadget 44 This chip is typically used as USB transceiver for USB host, gadget
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index d4809d551473..9886180e45f1 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -640,6 +640,7 @@ static struct usb_device_id id_table_combined [] = {
640 { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, 640 { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
641 { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, 641 { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
642 { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, 642 { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
643 { USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) },
643 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, 644 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
644 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, 645 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
645 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, 646 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 9d359e189a64..e79861eeed4c 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -584,6 +584,13 @@
584#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ 584#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
585 585
586/* 586/*
587 * Mitsubishi Electric Corp. (http://www.meau.com)
588 * Submitted by Konstantin Holoborodko
589 */
590#define MITSUBISHI_VID 0x06D3
591#define MITSUBISHI_FXUSB_PID 0x0284 /* USB/RS422 converters: FX-USB-AW/-BD */
592
593/*
587 * Definitions for B&B Electronics products. 594 * Definitions for B&B Electronics products.
588 */ 595 */
589#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */ 596#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 2e70efa08b77..5d9b178484fd 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -903,6 +903,7 @@ static int usb_serial_probe(struct usb_interface *interface,
903 port->port.ops = &serial_port_ops; 903 port->port.ops = &serial_port_ops;
904 port->serial = serial; 904 port->serial = serial;
905 spin_lock_init(&port->lock); 905 spin_lock_init(&port->lock);
906 init_waitqueue_head(&port->delta_msr_wait);
906 /* Keep this for private driver use for the moment but 907 /* Keep this for private driver use for the moment but
907 should probably go away */ 908 should probably go away */
908 INIT_WORK(&port->work, usb_serial_port_work); 909 INIT_WORK(&port->work, usb_serial_port_work);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 8189cb6a86af..7abc5c81af2c 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -346,6 +346,7 @@ static long vfio_pci_ioctl(void *device_data,
346 346
347 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { 347 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
348 size_t size; 348 size_t size;
349 int max = vfio_pci_get_irq_count(vdev, hdr.index);
349 350
350 if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) 351 if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
351 size = sizeof(uint8_t); 352 size = sizeof(uint8_t);
@@ -355,7 +356,7 @@ static long vfio_pci_ioctl(void *device_data,
355 return -EINVAL; 356 return -EINVAL;
356 357
357 if (hdr.argsz - minsz < hdr.count * size || 358 if (hdr.argsz - minsz < hdr.count * size ||
358 hdr.count > vfio_pci_get_irq_count(vdev, hdr.index)) 359 hdr.start >= max || hdr.start + hdr.count > max)
359 return -EINVAL; 360 return -EINVAL;
360 361
361 data = memdup_user((void __user *)(arg + minsz), 362 data = memdup_user((void __user *)(arg + minsz),
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 43fb11ee2e8d..957a0b98a5d9 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -60,14 +60,22 @@ enum {
60 VHOST_SCSI_VQ_IO = 2, 60 VHOST_SCSI_VQ_IO = 2,
61}; 61};
62 62
63/*
64 * VIRTIO_RING_F_EVENT_IDX seems broken. Not sure the bug is in
65 * kernel but disabling it helps.
66 * TODO: debug and remove the workaround.
67 */
68enum {
69 VHOST_SCSI_FEATURES = VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)
70};
71
63#define VHOST_SCSI_MAX_TARGET 256 72#define VHOST_SCSI_MAX_TARGET 256
64#define VHOST_SCSI_MAX_VQ 128 73#define VHOST_SCSI_MAX_VQ 128
65 74
66struct vhost_scsi { 75struct vhost_scsi {
67 /* Protected by vhost_scsi->dev.mutex */ 76 /* Protected by vhost_scsi->dev.mutex */
68 struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET]; 77 struct tcm_vhost_tpg **vs_tpg;
69 char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; 78 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
70 bool vs_endpoint;
71 79
72 struct vhost_dev dev; 80 struct vhost_dev dev;
73 struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; 81 struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
@@ -570,9 +578,27 @@ static void tcm_vhost_submission_work(struct work_struct *work)
570 } 578 }
571} 579}
572 580
581static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
582 struct vhost_virtqueue *vq, int head, unsigned out)
583{
584 struct virtio_scsi_cmd_resp __user *resp;
585 struct virtio_scsi_cmd_resp rsp;
586 int ret;
587
588 memset(&rsp, 0, sizeof(rsp));
589 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
590 resp = vq->iov[out].iov_base;
591 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
592 if (!ret)
593 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
594 else
595 pr_err("Faulted on virtio_scsi_cmd_resp\n");
596}
597
573static void vhost_scsi_handle_vq(struct vhost_scsi *vs, 598static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
574 struct vhost_virtqueue *vq) 599 struct vhost_virtqueue *vq)
575{ 600{
601 struct tcm_vhost_tpg **vs_tpg;
576 struct virtio_scsi_cmd_req v_req; 602 struct virtio_scsi_cmd_req v_req;
577 struct tcm_vhost_tpg *tv_tpg; 603 struct tcm_vhost_tpg *tv_tpg;
578 struct tcm_vhost_cmd *tv_cmd; 604 struct tcm_vhost_cmd *tv_cmd;
@@ -581,8 +607,16 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
581 int head, ret; 607 int head, ret;
582 u8 target; 608 u8 target;
583 609
584 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ 610 /*
585 if (unlikely(!vs->vs_endpoint)) 611 * We can handle the vq only after the endpoint is setup by calling the
612 * VHOST_SCSI_SET_ENDPOINT ioctl.
613 *
614 * TODO: Check that we are running from vhost_worker which acts
615 * as read-side critical section for vhost kind of RCU.
616 * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
617 */
618 vs_tpg = rcu_dereference_check(vq->private_data, 1);
619 if (!vs_tpg)
586 return; 620 return;
587 621
588 mutex_lock(&vq->mutex); 622 mutex_lock(&vq->mutex);
@@ -652,23 +686,11 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
652 686
653 /* Extract the tpgt */ 687 /* Extract the tpgt */
654 target = v_req.lun[1]; 688 target = v_req.lun[1];
655 tv_tpg = vs->vs_tpg[target]; 689 tv_tpg = ACCESS_ONCE(vs_tpg[target]);
656 690
657 /* Target does not exist, fail the request */ 691 /* Target does not exist, fail the request */
658 if (unlikely(!tv_tpg)) { 692 if (unlikely(!tv_tpg)) {
659 struct virtio_scsi_cmd_resp __user *resp; 693 vhost_scsi_send_bad_target(vs, vq, head, out);
660 struct virtio_scsi_cmd_resp rsp;
661
662 memset(&rsp, 0, sizeof(rsp));
663 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
664 resp = vq->iov[out].iov_base;
665 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
666 if (!ret)
667 vhost_add_used_and_signal(&vs->dev,
668 vq, head, 0);
669 else
670 pr_err("Faulted on virtio_scsi_cmd_resp\n");
671
672 continue; 694 continue;
673 } 695 }
674 696
@@ -681,22 +703,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
681 if (IS_ERR(tv_cmd)) { 703 if (IS_ERR(tv_cmd)) {
682 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", 704 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
683 PTR_ERR(tv_cmd)); 705 PTR_ERR(tv_cmd));
684 break; 706 goto err_cmd;
685 } 707 }
686 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" 708 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
687 ": %d\n", tv_cmd, exp_data_len, data_direction); 709 ": %d\n", tv_cmd, exp_data_len, data_direction);
688 710
689 tv_cmd->tvc_vhost = vs; 711 tv_cmd->tvc_vhost = vs;
690 tv_cmd->tvc_vq = vq; 712 tv_cmd->tvc_vq = vq;
691
692 if (unlikely(vq->iov[out].iov_len !=
693 sizeof(struct virtio_scsi_cmd_resp))) {
694 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
695 " bytes, out: %d, in: %d\n",
696 vq->iov[out].iov_len, out, in);
697 break;
698 }
699
700 tv_cmd->tvc_resp = vq->iov[out].iov_base; 713 tv_cmd->tvc_resp = vq->iov[out].iov_base;
701 714
702 /* 715 /*
@@ -716,7 +729,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
716 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 729 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
717 scsi_command_size(tv_cmd->tvc_cdb), 730 scsi_command_size(tv_cmd->tvc_cdb),
718 TCM_VHOST_MAX_CDB_SIZE); 731 TCM_VHOST_MAX_CDB_SIZE);
719 break; /* TODO */ 732 goto err_free;
720 } 733 }
721 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; 734 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
722 735
@@ -729,7 +742,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
729 data_direction == DMA_TO_DEVICE); 742 data_direction == DMA_TO_DEVICE);
730 if (unlikely(ret)) { 743 if (unlikely(ret)) {
731 vq_err(vq, "Failed to map iov to sgl\n"); 744 vq_err(vq, "Failed to map iov to sgl\n");
732 break; /* TODO */ 745 goto err_free;
733 } 746 }
734 } 747 }
735 748
@@ -750,6 +763,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
750 } 763 }
751 764
752 mutex_unlock(&vq->mutex); 765 mutex_unlock(&vq->mutex);
766 return;
767
768err_free:
769 vhost_scsi_free_cmd(tv_cmd);
770err_cmd:
771 vhost_scsi_send_bad_target(vs, vq, head, out);
772 mutex_unlock(&vq->mutex);
753} 773}
754 774
755static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) 775static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
@@ -771,6 +791,20 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
771 vhost_scsi_handle_vq(vs, vq); 791 vhost_scsi_handle_vq(vs, vq);
772} 792}
773 793
794static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
795{
796 vhost_poll_flush(&vs->dev.vqs[index].poll);
797}
798
799static void vhost_scsi_flush(struct vhost_scsi *vs)
800{
801 int i;
802
803 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
804 vhost_scsi_flush_vq(vs, i);
805 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
806}
807
774/* 808/*
775 * Called from vhost_scsi_ioctl() context to walk the list of available 809 * Called from vhost_scsi_ioctl() context to walk the list of available
776 * tcm_vhost_tpg with an active struct tcm_vhost_nexus 810 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
@@ -781,8 +815,10 @@ static int vhost_scsi_set_endpoint(
781{ 815{
782 struct tcm_vhost_tport *tv_tport; 816 struct tcm_vhost_tport *tv_tport;
783 struct tcm_vhost_tpg *tv_tpg; 817 struct tcm_vhost_tpg *tv_tpg;
818 struct tcm_vhost_tpg **vs_tpg;
819 struct vhost_virtqueue *vq;
820 int index, ret, i, len;
784 bool match = false; 821 bool match = false;
785 int index, ret;
786 822
787 mutex_lock(&vs->dev.mutex); 823 mutex_lock(&vs->dev.mutex);
788 /* Verify that ring has been setup correctly. */ 824 /* Verify that ring has been setup correctly. */
@@ -794,6 +830,15 @@ static int vhost_scsi_set_endpoint(
794 } 830 }
795 } 831 }
796 832
833 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
834 vs_tpg = kzalloc(len, GFP_KERNEL);
835 if (!vs_tpg) {
836 mutex_unlock(&vs->dev.mutex);
837 return -ENOMEM;
838 }
839 if (vs->vs_tpg)
840 memcpy(vs_tpg, vs->vs_tpg, len);
841
797 mutex_lock(&tcm_vhost_mutex); 842 mutex_lock(&tcm_vhost_mutex);
798 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { 843 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
799 mutex_lock(&tv_tpg->tv_tpg_mutex); 844 mutex_lock(&tv_tpg->tv_tpg_mutex);
@@ -808,14 +853,15 @@ static int vhost_scsi_set_endpoint(
808 tv_tport = tv_tpg->tport; 853 tv_tport = tv_tpg->tport;
809 854
810 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 855 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
811 if (vs->vs_tpg[tv_tpg->tport_tpgt]) { 856 if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
812 mutex_unlock(&tv_tpg->tv_tpg_mutex); 857 mutex_unlock(&tv_tpg->tv_tpg_mutex);
813 mutex_unlock(&tcm_vhost_mutex); 858 mutex_unlock(&tcm_vhost_mutex);
814 mutex_unlock(&vs->dev.mutex); 859 mutex_unlock(&vs->dev.mutex);
860 kfree(vs_tpg);
815 return -EEXIST; 861 return -EEXIST;
816 } 862 }
817 tv_tpg->tv_tpg_vhost_count++; 863 tv_tpg->tv_tpg_vhost_count++;
818 vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; 864 vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
819 smp_mb__after_atomic_inc(); 865 smp_mb__after_atomic_inc();
820 match = true; 866 match = true;
821 } 867 }
@@ -826,12 +872,27 @@ static int vhost_scsi_set_endpoint(
826 if (match) { 872 if (match) {
827 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, 873 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
828 sizeof(vs->vs_vhost_wwpn)); 874 sizeof(vs->vs_vhost_wwpn));
829 vs->vs_endpoint = true; 875 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
876 vq = &vs->vqs[i];
877 /* Flushing the vhost_work acts as synchronize_rcu */
878 mutex_lock(&vq->mutex);
879 rcu_assign_pointer(vq->private_data, vs_tpg);
880 vhost_init_used(vq);
881 mutex_unlock(&vq->mutex);
882 }
830 ret = 0; 883 ret = 0;
831 } else { 884 } else {
832 ret = -EEXIST; 885 ret = -EEXIST;
833 } 886 }
834 887
888 /*
889 * Act as synchronize_rcu to make sure access to
890 * old vs->vs_tpg is finished.
891 */
892 vhost_scsi_flush(vs);
893 kfree(vs->vs_tpg);
894 vs->vs_tpg = vs_tpg;
895
835 mutex_unlock(&vs->dev.mutex); 896 mutex_unlock(&vs->dev.mutex);
836 return ret; 897 return ret;
837} 898}
@@ -842,6 +903,8 @@ static int vhost_scsi_clear_endpoint(
842{ 903{
843 struct tcm_vhost_tport *tv_tport; 904 struct tcm_vhost_tport *tv_tport;
844 struct tcm_vhost_tpg *tv_tpg; 905 struct tcm_vhost_tpg *tv_tpg;
906 struct vhost_virtqueue *vq;
907 bool match = false;
845 int index, ret, i; 908 int index, ret, i;
846 u8 target; 909 u8 target;
847 910
@@ -853,9 +916,14 @@ static int vhost_scsi_clear_endpoint(
853 goto err_dev; 916 goto err_dev;
854 } 917 }
855 } 918 }
919
920 if (!vs->vs_tpg) {
921 mutex_unlock(&vs->dev.mutex);
922 return 0;
923 }
924
856 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { 925 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
857 target = i; 926 target = i;
858
859 tv_tpg = vs->vs_tpg[target]; 927 tv_tpg = vs->vs_tpg[target];
860 if (!tv_tpg) 928 if (!tv_tpg)
861 continue; 929 continue;
@@ -877,10 +945,27 @@ static int vhost_scsi_clear_endpoint(
877 } 945 }
878 tv_tpg->tv_tpg_vhost_count--; 946 tv_tpg->tv_tpg_vhost_count--;
879 vs->vs_tpg[target] = NULL; 947 vs->vs_tpg[target] = NULL;
880 vs->vs_endpoint = false; 948 match = true;
881 mutex_unlock(&tv_tpg->tv_tpg_mutex); 949 mutex_unlock(&tv_tpg->tv_tpg_mutex);
882 } 950 }
951 if (match) {
952 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
953 vq = &vs->vqs[i];
954 /* Flushing the vhost_work acts as synchronize_rcu */
955 mutex_lock(&vq->mutex);
956 rcu_assign_pointer(vq->private_data, NULL);
957 mutex_unlock(&vq->mutex);
958 }
959 }
960 /*
961 * Act as synchronize_rcu to make sure access to
962 * old vs->vs_tpg is finished.
963 */
964 vhost_scsi_flush(vs);
965 kfree(vs->vs_tpg);
966 vs->vs_tpg = NULL;
883 mutex_unlock(&vs->dev.mutex); 967 mutex_unlock(&vs->dev.mutex);
968
884 return 0; 969 return 0;
885 970
886err_tpg: 971err_tpg:
@@ -890,6 +975,24 @@ err_dev:
890 return ret; 975 return ret;
891} 976}
892 977
978static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
979{
980 if (features & ~VHOST_SCSI_FEATURES)
981 return -EOPNOTSUPP;
982
983 mutex_lock(&vs->dev.mutex);
984 if ((features & (1 << VHOST_F_LOG_ALL)) &&
985 !vhost_log_access_ok(&vs->dev)) {
986 mutex_unlock(&vs->dev.mutex);
987 return -EFAULT;
988 }
989 vs->dev.acked_features = features;
990 smp_wmb();
991 vhost_scsi_flush(vs);
992 mutex_unlock(&vs->dev.mutex);
993 return 0;
994}
995
893static int vhost_scsi_open(struct inode *inode, struct file *f) 996static int vhost_scsi_open(struct inode *inode, struct file *f)
894{ 997{
895 struct vhost_scsi *s; 998 struct vhost_scsi *s;
@@ -930,38 +1033,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
930 return 0; 1033 return 0;
931} 1034}
932 1035
933static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
934{
935 vhost_poll_flush(&vs->dev.vqs[index].poll);
936}
937
938static void vhost_scsi_flush(struct vhost_scsi *vs)
939{
940 int i;
941
942 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
943 vhost_scsi_flush_vq(vs, i);
944 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
945}
946
947static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
948{
949 if (features & ~VHOST_FEATURES)
950 return -EOPNOTSUPP;
951
952 mutex_lock(&vs->dev.mutex);
953 if ((features & (1 << VHOST_F_LOG_ALL)) &&
954 !vhost_log_access_ok(&vs->dev)) {
955 mutex_unlock(&vs->dev.mutex);
956 return -EFAULT;
957 }
958 vs->dev.acked_features = features;
959 smp_wmb();
960 vhost_scsi_flush(vs);
961 mutex_unlock(&vs->dev.mutex);
962 return 0;
963}
964
965static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, 1036static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
966 unsigned long arg) 1037 unsigned long arg)
967{ 1038{
@@ -992,7 +1063,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
992 return -EFAULT; 1063 return -EFAULT;
993 return 0; 1064 return 0;
994 case VHOST_GET_FEATURES: 1065 case VHOST_GET_FEATURES:
995 features = VHOST_FEATURES; 1066 features = VHOST_SCSI_FEATURES;
996 if (copy_to_user(featurep, &features, sizeof features)) 1067 if (copy_to_user(featurep, &features, sizeof features))
997 return -EFAULT; 1068 return -EFAULT;
998 return 0; 1069 return 0;
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 94ad0f71383c..7f6709991a5c 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -1400,7 +1400,7 @@ int fb_videomode_from_videomode(const struct videomode *vm,
1400 fbmode->vmode = 0; 1400 fbmode->vmode = 0;
1401 if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) 1401 if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
1402 fbmode->sync |= FB_SYNC_HOR_HIGH_ACT; 1402 fbmode->sync |= FB_SYNC_HOR_HIGH_ACT;
1403 if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) 1403 if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH)
1404 fbmode->sync |= FB_SYNC_VERT_HIGH_ACT; 1404 fbmode->sync |= FB_SYNC_VERT_HIGH_ACT;
1405 if (vm->data_flags & DISPLAY_FLAGS_INTERLACED) 1405 if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)
1406 fbmode->vmode |= FB_VMODE_INTERLACED; 1406 fbmode->vmode |= FB_VMODE_INTERLACED;
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 755556ca5b2d..45169cbaba6e 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -169,6 +169,7 @@ struct mxsfb_info {
169 unsigned dotclk_delay; 169 unsigned dotclk_delay;
170 const struct mxsfb_devdata *devdata; 170 const struct mxsfb_devdata *devdata;
171 int mapped; 171 int mapped;
172 u32 sync;
172}; 173};
173 174
174#define mxsfb_is_v3(host) (host->devdata->ipversion == 3) 175#define mxsfb_is_v3(host) (host->devdata->ipversion == 3)
@@ -456,9 +457,9 @@ static int mxsfb_set_par(struct fb_info *fb_info)
456 vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; 457 vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
457 if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT) 458 if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT)
458 vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; 459 vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
459 if (fb_info->var.sync & FB_SYNC_DATA_ENABLE_HIGH_ACT) 460 if (host->sync & MXSFB_SYNC_DATA_ENABLE_HIGH_ACT)
460 vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; 461 vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
461 if (fb_info->var.sync & FB_SYNC_DOTCLK_FAILING_ACT) 462 if (host->sync & MXSFB_SYNC_DOTCLK_FAILING_ACT)
462 vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING; 463 vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING;
463 464
464 writel(vdctrl0, host->base + LCDC_VDCTRL0); 465 writel(vdctrl0, host->base + LCDC_VDCTRL0);
@@ -861,6 +862,8 @@ static int mxsfb_probe(struct platform_device *pdev)
861 862
862 INIT_LIST_HEAD(&fb_info->modelist); 863 INIT_LIST_HEAD(&fb_info->modelist);
863 864
865 host->sync = pdata->sync;
866
864 ret = mxsfb_init_fbinfo(host); 867 ret = mxsfb_init_fbinfo(host);
865 if (ret != 0) 868 if (ret != 0)
866 goto error_init_fb; 869 goto error_init_fb;
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index e31f5b33b501..d40612c31a98 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -32,6 +32,8 @@
32 32
33#include <linux/omap-dma.h> 33#include <linux/omap-dma.h>
34 34
35#include <mach/hardware.h>
36
35#include "omapfb.h" 37#include "omapfb.h"
36#include "lcdc.h" 38#include "lcdc.h"
37 39
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index 6b6643911d29..048c98381ef6 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -63,6 +63,9 @@ struct tpo_td043_device {
63 u32 power_on_resume:1; 63 u32 power_on_resume:1;
64}; 64};
65 65
66/* used to pass spi_device from SPI to DSS portion of the driver */
67static struct tpo_td043_device *g_tpo_td043;
68
66static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data) 69static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data)
67{ 70{
68 struct spi_message m; 71 struct spi_message m;
@@ -403,7 +406,7 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
403 406
404static int tpo_td043_probe(struct omap_dss_device *dssdev) 407static int tpo_td043_probe(struct omap_dss_device *dssdev)
405{ 408{
406 struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev); 409 struct tpo_td043_device *tpo_td043 = g_tpo_td043;
407 int nreset_gpio = dssdev->reset_gpio; 410 int nreset_gpio = dssdev->reset_gpio;
408 int ret = 0; 411 int ret = 0;
409 412
@@ -440,6 +443,8 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev)
440 if (ret) 443 if (ret)
441 dev_warn(&dssdev->dev, "failed to create sysfs files\n"); 444 dev_warn(&dssdev->dev, "failed to create sysfs files\n");
442 445
446 dev_set_drvdata(&dssdev->dev, tpo_td043);
447
443 return 0; 448 return 0;
444 449
445fail_gpio_req: 450fail_gpio_req:
@@ -505,6 +510,9 @@ static int tpo_td043_spi_probe(struct spi_device *spi)
505 return -ENODEV; 510 return -ENODEV;
506 } 511 }
507 512
513 if (g_tpo_td043 != NULL)
514 return -EBUSY;
515
508 spi->bits_per_word = 16; 516 spi->bits_per_word = 16;
509 spi->mode = SPI_MODE_0; 517 spi->mode = SPI_MODE_0;
510 518
@@ -521,7 +529,7 @@ static int tpo_td043_spi_probe(struct spi_device *spi)
521 tpo_td043->spi = spi; 529 tpo_td043->spi = spi;
522 tpo_td043->nreset_gpio = dssdev->reset_gpio; 530 tpo_td043->nreset_gpio = dssdev->reset_gpio;
523 dev_set_drvdata(&spi->dev, tpo_td043); 531 dev_set_drvdata(&spi->dev, tpo_td043);
524 dev_set_drvdata(&dssdev->dev, tpo_td043); 532 g_tpo_td043 = tpo_td043;
525 533
526 omap_dss_register_driver(&tpo_td043_driver); 534 omap_dss_register_driver(&tpo_td043_driver);
527 535
@@ -534,6 +542,7 @@ static int tpo_td043_spi_remove(struct spi_device *spi)
534 542
535 omap_dss_unregister_driver(&tpo_td043_driver); 543 omap_dss_unregister_driver(&tpo_td043_driver);
536 kfree(tpo_td043); 544 kfree(tpo_td043);
545 g_tpo_td043 = NULL;
537 546
538 return 0; 547 return 0;
539} 548}
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index d7d66ef5cb58..7f791aeda4d2 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -202,12 +202,10 @@ static const enum omap_dss_output_id omap3630_dss_supported_outputs[] = {
202 202
203static const enum omap_dss_output_id omap4_dss_supported_outputs[] = { 203static const enum omap_dss_output_id omap4_dss_supported_outputs[] = {
204 /* OMAP_DSS_CHANNEL_LCD */ 204 /* OMAP_DSS_CHANNEL_LCD */
205 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | 205 OMAP_DSS_OUTPUT_DBI | OMAP_DSS_OUTPUT_DSI1,
206 OMAP_DSS_OUTPUT_DSI1,
207 206
208 /* OMAP_DSS_CHANNEL_DIGIT */ 207 /* OMAP_DSS_CHANNEL_DIGIT */
209 OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI | 208 OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI,
210 OMAP_DSS_OUTPUT_DPI,
211 209
212 /* OMAP_DSS_CHANNEL_LCD2 */ 210 /* OMAP_DSS_CHANNEL_LCD2 */
213 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | 211 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 63203acef812..0264704a52be 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -858,6 +858,7 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch)
858 tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16) 858 tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16)
859 | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7); 859 | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7);
860 lcdc_write_chan(ch, LDHAJR, tmp); 860 lcdc_write_chan(ch, LDHAJR, tmp);
861 lcdc_write_chan_mirror(ch, LDHAJR, tmp);
861} 862}
862 863
863static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) 864static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl)
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index b75db0186488..d4284458377e 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -1973,7 +1973,8 @@ static int uvesafb_init(void)
1973 err = -ENOMEM; 1973 err = -ENOMEM;
1974 1974
1975 if (err) { 1975 if (err) {
1976 platform_device_put(uvesafb_device); 1976 if (uvesafb_device)
1977 platform_device_put(uvesafb_device);
1977 platform_driver_unregister(&uvesafb_driver); 1978 platform_driver_unregister(&uvesafb_driver);
1978 cn_del_callback(&uvesafb_cn_id); 1979 cn_del_callback(&uvesafb_cn_id);
1979 return err; 1980 return err;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 9fcc70c11cea..e89fc3133972 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -117,7 +117,7 @@ config ARM_SP805_WATCHDOG
117 117
118config AT91RM9200_WATCHDOG 118config AT91RM9200_WATCHDOG
119 tristate "AT91RM9200 watchdog" 119 tristate "AT91RM9200 watchdog"
120 depends on ARCH_AT91 120 depends on ARCH_AT91RM9200
121 help 121 help
122 Watchdog timer embedded into AT91RM9200 chips. This will reboot your 122 Watchdog timer embedded into AT91RM9200 chips. This will reboot your
123 system when the timeout is reached. 123 system when the timeout is reached.
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 5a32232cf7c1..67af155cf602 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -182,7 +182,7 @@ config XEN_PRIVCMD
182 182
183config XEN_STUB 183config XEN_STUB
184 bool "Xen stub drivers" 184 bool "Xen stub drivers"
185 depends on XEN && X86_64 185 depends on XEN && X86_64 && BROKEN
186 default n 186 default n
187 help 187 help
188 Allow kernel to install stub drivers, to reserve space for Xen drivers, 188 Allow kernel to install stub drivers, to reserve space for Xen drivers,
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index d17aa41a9041..2647ad8e1f19 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -403,11 +403,23 @@ static void unmask_evtchn(int port)
403 403
404 if (unlikely((cpu != cpu_from_evtchn(port)))) 404 if (unlikely((cpu != cpu_from_evtchn(port))))
405 do_hypercall = 1; 405 do_hypercall = 1;
406 else 406 else {
407 /*
408 * Need to clear the mask before checking pending to
409 * avoid a race with an event becoming pending.
410 *
411 * EVTCHNOP_unmask will only trigger an upcall if the
412 * mask bit was set, so if a hypercall is needed
413 * remask the event.
414 */
415 sync_clear_bit(port, BM(&s->evtchn_mask[0]));
407 evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0])); 416 evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
408 417
409 if (unlikely(evtchn_pending && xen_hvm_domain())) 418 if (unlikely(evtchn_pending && xen_hvm_domain())) {
410 do_hypercall = 1; 419 sync_set_bit(port, BM(&s->evtchn_mask[0]));
420 do_hypercall = 1;
421 }
422 }
411 423
412 /* Slow path (hypercall) if this is a non-local port or if this is 424 /* Slow path (hypercall) if this is a non-local port or if this is
413 * an hvm domain and an event is pending (hvm domains don't have 425 * an hvm domain and an event is pending (hvm domains don't have
@@ -418,8 +430,6 @@ static void unmask_evtchn(int port)
418 } else { 430 } else {
419 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); 431 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
420 432
421 sync_clear_bit(port, BM(&s->evtchn_mask[0]));
422
423 /* 433 /*
424 * The following is basically the equivalent of 434 * The following is basically the equivalent of
425 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose 435 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
@@ -1306,7 +1316,7 @@ static void __xen_evtchn_do_upcall(void)
1306{ 1316{
1307 int start_word_idx, start_bit_idx; 1317 int start_word_idx, start_bit_idx;
1308 int word_idx, bit_idx; 1318 int word_idx, bit_idx;
1309 int i; 1319 int i, irq;
1310 int cpu = get_cpu(); 1320 int cpu = get_cpu();
1311 struct shared_info *s = HYPERVISOR_shared_info; 1321 struct shared_info *s = HYPERVISOR_shared_info;
1312 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); 1322 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
@@ -1314,6 +1324,8 @@ static void __xen_evtchn_do_upcall(void)
1314 1324
1315 do { 1325 do {
1316 xen_ulong_t pending_words; 1326 xen_ulong_t pending_words;
1327 xen_ulong_t pending_bits;
1328 struct irq_desc *desc;
1317 1329
1318 vcpu_info->evtchn_upcall_pending = 0; 1330 vcpu_info->evtchn_upcall_pending = 0;
1319 1331
@@ -1325,6 +1337,17 @@ static void __xen_evtchn_do_upcall(void)
1325 * selector flag. xchg_xen_ulong must contain an 1337 * selector flag. xchg_xen_ulong must contain an
1326 * appropriate barrier. 1338 * appropriate barrier.
1327 */ 1339 */
1340 if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
1341 int evtchn = evtchn_from_irq(irq);
1342 word_idx = evtchn / BITS_PER_LONG;
1343 pending_bits = evtchn % BITS_PER_LONG;
1344 if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
1345 desc = irq_to_desc(irq);
1346 if (desc)
1347 generic_handle_irq_desc(irq, desc);
1348 }
1349 }
1350
1328 pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0); 1351 pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
1329 1352
1330 start_word_idx = __this_cpu_read(current_word_idx); 1353 start_word_idx = __this_cpu_read(current_word_idx);
@@ -1333,7 +1356,6 @@ static void __xen_evtchn_do_upcall(void)
1333 word_idx = start_word_idx; 1356 word_idx = start_word_idx;
1334 1357
1335 for (i = 0; pending_words != 0; i++) { 1358 for (i = 0; pending_words != 0; i++) {
1336 xen_ulong_t pending_bits;
1337 xen_ulong_t words; 1359 xen_ulong_t words;
1338 1360
1339 words = MASK_LSBS(pending_words, word_idx); 1361 words = MASK_LSBS(pending_words, word_idx);
@@ -1362,8 +1384,7 @@ static void __xen_evtchn_do_upcall(void)
1362 1384
1363 do { 1385 do {
1364 xen_ulong_t bits; 1386 xen_ulong_t bits;
1365 int port, irq; 1387 int port;
1366 struct irq_desc *desc;
1367 1388
1368 bits = MASK_LSBS(pending_bits, bit_idx); 1389 bits = MASK_LSBS(pending_bits, bit_idx);
1369 1390
diff --git a/drivers/xen/fallback.c b/drivers/xen/fallback.c
index 0ef7c4d40f86..b04fb64c5a91 100644
--- a/drivers/xen/fallback.c
+++ b/drivers/xen/fallback.c
@@ -44,7 +44,7 @@ int xen_event_channel_op_compat(int cmd, void *arg)
44} 44}
45EXPORT_SYMBOL_GPL(xen_event_channel_op_compat); 45EXPORT_SYMBOL_GPL(xen_event_channel_op_compat);
46 46
47int HYPERVISOR_physdev_op_compat(int cmd, void *arg) 47int xen_physdev_op_compat(int cmd, void *arg)
48{ 48{
49 struct physdev_op op; 49 struct physdev_op op;
50 int rc; 50 int rc;
@@ -78,3 +78,4 @@ int HYPERVISOR_physdev_op_compat(int cmd, void *arg)
78 78
79 return rc; 79 return rc;
80} 80}
81EXPORT_SYMBOL_GPL(xen_physdev_op_compat);
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index f3278a6603ca..90e34ac7e522 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -505,6 +505,9 @@ static int __init xen_acpi_processor_init(void)
505 505
506 pr = per_cpu(processors, i); 506 pr = per_cpu(processors, i);
507 perf = per_cpu_ptr(acpi_perf_data, i); 507 perf = per_cpu_ptr(acpi_perf_data, i);
508 if (!pr)
509 continue;
510
508 pr->performance = perf; 511 pr->performance = perf;
509 rc = acpi_processor_get_performance_info(pr); 512 rc = acpi_processor_get_performance_info(pr);
510 if (rc) 513 if (rc)
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 9204126f1560..a2278ba7fb27 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -17,6 +17,7 @@
17#include <xen/events.h> 17#include <xen/events.h>
18#include <asm/xen/pci.h> 18#include <asm/xen/pci.h>
19#include <asm/xen/hypervisor.h> 19#include <asm/xen/hypervisor.h>
20#include <xen/interface/physdev.h>
20#include "pciback.h" 21#include "pciback.h"
21#include "conf_space.h" 22#include "conf_space.h"
22#include "conf_space_quirks.h" 23#include "conf_space_quirks.h"
@@ -85,37 +86,52 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
85static void pcistub_device_release(struct kref *kref) 86static void pcistub_device_release(struct kref *kref)
86{ 87{
87 struct pcistub_device *psdev; 88 struct pcistub_device *psdev;
89 struct pci_dev *dev;
88 struct xen_pcibk_dev_data *dev_data; 90 struct xen_pcibk_dev_data *dev_data;
89 91
90 psdev = container_of(kref, struct pcistub_device, kref); 92 psdev = container_of(kref, struct pcistub_device, kref);
91 dev_data = pci_get_drvdata(psdev->dev); 93 dev = psdev->dev;
94 dev_data = pci_get_drvdata(dev);
92 95
93 dev_dbg(&psdev->dev->dev, "pcistub_device_release\n"); 96 dev_dbg(&dev->dev, "pcistub_device_release\n");
94 97
95 xen_unregister_device_domain_owner(psdev->dev); 98 xen_unregister_device_domain_owner(dev);
96 99
97 /* Call the reset function which does not take lock as this 100 /* Call the reset function which does not take lock as this
98 * is called from "unbind" which takes a device_lock mutex. 101 * is called from "unbind" which takes a device_lock mutex.
99 */ 102 */
100 __pci_reset_function_locked(psdev->dev); 103 __pci_reset_function_locked(dev);
101 if (pci_load_and_free_saved_state(psdev->dev, 104 if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
102 &dev_data->pci_saved_state)) { 105 dev_dbg(&dev->dev, "Could not reload PCI state\n");
103 dev_dbg(&psdev->dev->dev, "Could not reload PCI state\n"); 106 else
104 } else 107 pci_restore_state(dev);
105 pci_restore_state(psdev->dev); 108
109 if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
110 struct physdev_pci_device ppdev = {
111 .seg = pci_domain_nr(dev->bus),
112 .bus = dev->bus->number,
113 .devfn = dev->devfn
114 };
115 int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
116 &ppdev);
117
118 if (err)
119 dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
120 err);
121 }
106 122
107 /* Disable the device */ 123 /* Disable the device */
108 xen_pcibk_reset_device(psdev->dev); 124 xen_pcibk_reset_device(dev);
109 125
110 kfree(dev_data); 126 kfree(dev_data);
111 pci_set_drvdata(psdev->dev, NULL); 127 pci_set_drvdata(dev, NULL);
112 128
113 /* Clean-up the device */ 129 /* Clean-up the device */
114 xen_pcibk_config_free_dyn_fields(psdev->dev); 130 xen_pcibk_config_free_dyn_fields(dev);
115 xen_pcibk_config_free_dev(psdev->dev); 131 xen_pcibk_config_free_dev(dev);
116 132
117 psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; 133 dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
118 pci_dev_put(psdev->dev); 134 pci_dev_put(dev);
119 135
120 kfree(psdev); 136 kfree(psdev);
121} 137}
@@ -355,6 +371,19 @@ static int pcistub_init_device(struct pci_dev *dev)
355 if (err) 371 if (err)
356 goto config_release; 372 goto config_release;
357 373
374 if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
375 struct physdev_pci_device ppdev = {
376 .seg = pci_domain_nr(dev->bus),
377 .bus = dev->bus->number,
378 .devfn = dev->devfn
379 };
380
381 err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
382 if (err)
383 dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
384 err);
385 }
386
358 /* We need the device active to save the state. */ 387 /* We need the device active to save the state. */
359 dev_dbg(&dev->dev, "save state of device\n"); 388 dev_dbg(&dev->dev, "save state of device\n");
360 pci_save_state(dev); 389 pci_save_state(dev);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index aea605c98ba6..aae187a7f94a 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -551,6 +551,7 @@ struct block_device *bdgrab(struct block_device *bdev)
551 ihold(bdev->bd_inode); 551 ihold(bdev->bd_inode);
552 return bdev; 552 return bdev;
553} 553}
554EXPORT_SYMBOL(bdgrab);
554 555
555long nr_blockdev_pages(void) 556long nr_blockdev_pages(void)
556{ 557{
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index ecd25a1b4e51..ca9d8f1a3bb6 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -651,6 +651,8 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
651 if (tree_mod_dont_log(fs_info, NULL)) 651 if (tree_mod_dont_log(fs_info, NULL))
652 return 0; 652 return 0;
653 653
654 __tree_mod_log_free_eb(fs_info, old_root);
655
654 ret = tree_mod_alloc(fs_info, flags, &tm); 656 ret = tree_mod_alloc(fs_info, flags, &tm);
655 if (ret < 0) 657 if (ret < 0)
656 goto out; 658 goto out;
@@ -736,7 +738,7 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
736static noinline void 738static noinline void
737tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, 739tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
738 struct extent_buffer *src, unsigned long dst_offset, 740 struct extent_buffer *src, unsigned long dst_offset,
739 unsigned long src_offset, int nr_items) 741 unsigned long src_offset, int nr_items, int log_removal)
740{ 742{
741 int ret; 743 int ret;
742 int i; 744 int i;
@@ -750,10 +752,12 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
750 } 752 }
751 753
752 for (i = 0; i < nr_items; i++) { 754 for (i = 0; i < nr_items; i++) {
753 ret = tree_mod_log_insert_key_locked(fs_info, src, 755 if (log_removal) {
754 i + src_offset, 756 ret = tree_mod_log_insert_key_locked(fs_info, src,
755 MOD_LOG_KEY_REMOVE); 757 i + src_offset,
756 BUG_ON(ret < 0); 758 MOD_LOG_KEY_REMOVE);
759 BUG_ON(ret < 0);
760 }
757 ret = tree_mod_log_insert_key_locked(fs_info, dst, 761 ret = tree_mod_log_insert_key_locked(fs_info, dst,
758 i + dst_offset, 762 i + dst_offset,
759 MOD_LOG_KEY_ADD); 763 MOD_LOG_KEY_ADD);
@@ -927,7 +931,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
927 ret = btrfs_dec_ref(trans, root, buf, 1, 1); 931 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
928 BUG_ON(ret); /* -ENOMEM */ 932 BUG_ON(ret); /* -ENOMEM */
929 } 933 }
930 tree_mod_log_free_eb(root->fs_info, buf);
931 clean_tree_block(trans, root, buf); 934 clean_tree_block(trans, root, buf);
932 *last_ref = 1; 935 *last_ref = 1;
933 } 936 }
@@ -1046,6 +1049,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1046 btrfs_set_node_ptr_generation(parent, parent_slot, 1049 btrfs_set_node_ptr_generation(parent, parent_slot,
1047 trans->transid); 1050 trans->transid);
1048 btrfs_mark_buffer_dirty(parent); 1051 btrfs_mark_buffer_dirty(parent);
1052 tree_mod_log_free_eb(root->fs_info, buf);
1049 btrfs_free_tree_block(trans, root, buf, parent_start, 1053 btrfs_free_tree_block(trans, root, buf, parent_start,
1050 last_ref); 1054 last_ref);
1051 } 1055 }
@@ -1750,7 +1754,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1750 goto enospc; 1754 goto enospc;
1751 } 1755 }
1752 1756
1753 tree_mod_log_free_eb(root->fs_info, root->node);
1754 tree_mod_log_set_root_pointer(root, child); 1757 tree_mod_log_set_root_pointer(root, child);
1755 rcu_assign_pointer(root->node, child); 1758 rcu_assign_pointer(root->node, child);
1756 1759
@@ -2995,7 +2998,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
2995 push_items = min(src_nritems - 8, push_items); 2998 push_items = min(src_nritems - 8, push_items);
2996 2999
2997 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, 3000 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
2998 push_items); 3001 push_items, 1);
2999 copy_extent_buffer(dst, src, 3002 copy_extent_buffer(dst, src,
3000 btrfs_node_key_ptr_offset(dst_nritems), 3003 btrfs_node_key_ptr_offset(dst_nritems),
3001 btrfs_node_key_ptr_offset(0), 3004 btrfs_node_key_ptr_offset(0),
@@ -3066,7 +3069,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
3066 sizeof(struct btrfs_key_ptr)); 3069 sizeof(struct btrfs_key_ptr));
3067 3070
3068 tree_mod_log_eb_copy(root->fs_info, dst, src, 0, 3071 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3069 src_nritems - push_items, push_items); 3072 src_nritems - push_items, push_items, 1);
3070 copy_extent_buffer(dst, src, 3073 copy_extent_buffer(dst, src,
3071 btrfs_node_key_ptr_offset(0), 3074 btrfs_node_key_ptr_offset(0),
3072 btrfs_node_key_ptr_offset(src_nritems - push_items), 3075 btrfs_node_key_ptr_offset(src_nritems - push_items),
@@ -3218,12 +3221,18 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
3218 int mid; 3221 int mid;
3219 int ret; 3222 int ret;
3220 u32 c_nritems; 3223 u32 c_nritems;
3224 int tree_mod_log_removal = 1;
3221 3225
3222 c = path->nodes[level]; 3226 c = path->nodes[level];
3223 WARN_ON(btrfs_header_generation(c) != trans->transid); 3227 WARN_ON(btrfs_header_generation(c) != trans->transid);
3224 if (c == root->node) { 3228 if (c == root->node) {
3225 /* trying to split the root, lets make a new one */ 3229 /* trying to split the root, lets make a new one */
3226 ret = insert_new_root(trans, root, path, level + 1); 3230 ret = insert_new_root(trans, root, path, level + 1);
3231 /*
3232 * removal of root nodes has been logged by
3233 * tree_mod_log_set_root_pointer due to locking
3234 */
3235 tree_mod_log_removal = 0;
3227 if (ret) 3236 if (ret)
3228 return ret; 3237 return ret;
3229 } else { 3238 } else {
@@ -3261,7 +3270,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
3261 (unsigned long)btrfs_header_chunk_tree_uuid(split), 3270 (unsigned long)btrfs_header_chunk_tree_uuid(split),
3262 BTRFS_UUID_SIZE); 3271 BTRFS_UUID_SIZE);
3263 3272
3264 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid); 3273 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid,
3274 tree_mod_log_removal);
3265 copy_extent_buffer(split, c, 3275 copy_extent_buffer(split, c,
3266 btrfs_node_key_ptr_offset(0), 3276 btrfs_node_key_ptr_offset(0),
3267 btrfs_node_key_ptr_offset(mid), 3277 btrfs_node_key_ptr_offset(mid),
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 7d84651e850b..6d19a0a554aa 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1291,6 +1291,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1291 0, objectid, NULL, 0, 0, 0); 1291 0, objectid, NULL, 0, 0, 0);
1292 if (IS_ERR(leaf)) { 1292 if (IS_ERR(leaf)) {
1293 ret = PTR_ERR(leaf); 1293 ret = PTR_ERR(leaf);
1294 leaf = NULL;
1294 goto fail; 1295 goto fail;
1295 } 1296 }
1296 1297
@@ -1334,11 +1335,16 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1334 1335
1335 btrfs_tree_unlock(leaf); 1336 btrfs_tree_unlock(leaf);
1336 1337
1338 return root;
1339
1337fail: 1340fail:
1338 if (ret) 1341 if (leaf) {
1339 return ERR_PTR(ret); 1342 btrfs_tree_unlock(leaf);
1343 free_extent_buffer(leaf);
1344 }
1345 kfree(root);
1340 1346
1341 return root; 1347 return ERR_PTR(ret);
1342} 1348}
1343 1349
1344static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, 1350static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
@@ -3253,7 +3259,7 @@ void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
3253 if (btrfs_root_refs(&root->root_item) == 0) 3259 if (btrfs_root_refs(&root->root_item) == 0)
3254 synchronize_srcu(&fs_info->subvol_srcu); 3260 synchronize_srcu(&fs_info->subvol_srcu);
3255 3261
3256 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 3262 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3257 btrfs_free_log(NULL, root); 3263 btrfs_free_log(NULL, root);
3258 btrfs_free_log_root_tree(NULL, fs_info); 3264 btrfs_free_log_root_tree(NULL, fs_info);
3259 } 3265 }
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9ac2eca681eb..3d551231caba 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -257,7 +257,8 @@ static int exclude_super_stripes(struct btrfs_root *root,
257 cache->bytes_super += stripe_len; 257 cache->bytes_super += stripe_len;
258 ret = add_excluded_extent(root, cache->key.objectid, 258 ret = add_excluded_extent(root, cache->key.objectid,
259 stripe_len); 259 stripe_len);
260 BUG_ON(ret); /* -ENOMEM */ 260 if (ret)
261 return ret;
261 } 262 }
262 263
263 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 264 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
@@ -265,13 +266,17 @@ static int exclude_super_stripes(struct btrfs_root *root,
265 ret = btrfs_rmap_block(&root->fs_info->mapping_tree, 266 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
266 cache->key.objectid, bytenr, 267 cache->key.objectid, bytenr,
267 0, &logical, &nr, &stripe_len); 268 0, &logical, &nr, &stripe_len);
268 BUG_ON(ret); /* -ENOMEM */ 269 if (ret)
270 return ret;
269 271
270 while (nr--) { 272 while (nr--) {
271 cache->bytes_super += stripe_len; 273 cache->bytes_super += stripe_len;
272 ret = add_excluded_extent(root, logical[nr], 274 ret = add_excluded_extent(root, logical[nr],
273 stripe_len); 275 stripe_len);
274 BUG_ON(ret); /* -ENOMEM */ 276 if (ret) {
277 kfree(logical);
278 return ret;
279 }
275 } 280 }
276 281
277 kfree(logical); 282 kfree(logical);
@@ -4438,7 +4443,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4438 spin_lock(&sinfo->lock); 4443 spin_lock(&sinfo->lock);
4439 spin_lock(&block_rsv->lock); 4444 spin_lock(&block_rsv->lock);
4440 4445
4441 block_rsv->size = num_bytes; 4446 block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4442 4447
4443 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + 4448 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4444 sinfo->bytes_reserved + sinfo->bytes_readonly + 4449 sinfo->bytes_reserved + sinfo->bytes_readonly +
@@ -4793,14 +4798,49 @@ out_fail:
4793 * If the inodes csum_bytes is the same as the original 4798 * If the inodes csum_bytes is the same as the original
4794 * csum_bytes then we know we haven't raced with any free()ers 4799 * csum_bytes then we know we haven't raced with any free()ers
4795 * so we can just reduce our inodes csum bytes and carry on. 4800 * so we can just reduce our inodes csum bytes and carry on.
4796 * Otherwise we have to do the normal free thing to account for
4797 * the case that the free side didn't free up its reserve
4798 * because of this outstanding reservation.
4799 */ 4801 */
4800 if (BTRFS_I(inode)->csum_bytes == csum_bytes) 4802 if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
4801 calc_csum_metadata_size(inode, num_bytes, 0); 4803 calc_csum_metadata_size(inode, num_bytes, 0);
4802 else 4804 } else {
4803 to_free = calc_csum_metadata_size(inode, num_bytes, 0); 4805 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
4806 u64 bytes;
4807
4808 /*
4809 * This is tricky, but first we need to figure out how much we
4810 * free'd from any free-ers that occured during this
4811 * reservation, so we reset ->csum_bytes to the csum_bytes
4812 * before we dropped our lock, and then call the free for the
4813 * number of bytes that were freed while we were trying our
4814 * reservation.
4815 */
4816 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
4817 BTRFS_I(inode)->csum_bytes = csum_bytes;
4818 to_free = calc_csum_metadata_size(inode, bytes, 0);
4819
4820
4821 /*
4822 * Now we need to see how much we would have freed had we not
4823 * been making this reservation and our ->csum_bytes were not
4824 * artificially inflated.
4825 */
4826 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
4827 bytes = csum_bytes - orig_csum_bytes;
4828 bytes = calc_csum_metadata_size(inode, bytes, 0);
4829
4830 /*
4831 * Now reset ->csum_bytes to what it should be. If bytes is
4832 * more than to_free then we would have free'd more space had we
4833 * not had an artificially high ->csum_bytes, so we need to free
4834 * the remainder. If bytes is the same or less then we don't
4835 * need to do anything, the other free-ers did the correct
4836 * thing.
4837 */
4838 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
4839 if (bytes > to_free)
4840 to_free = bytes - to_free;
4841 else
4842 to_free = 0;
4843 }
4804 spin_unlock(&BTRFS_I(inode)->lock); 4844 spin_unlock(&BTRFS_I(inode)->lock);
4805 if (dropped) 4845 if (dropped)
4806 to_free += btrfs_calc_trans_metadata_size(root, dropped); 4846 to_free += btrfs_calc_trans_metadata_size(root, dropped);
@@ -7947,7 +7987,17 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7947 * info has super bytes accounted for, otherwise we'll think 7987 * info has super bytes accounted for, otherwise we'll think
7948 * we have more space than we actually do. 7988 * we have more space than we actually do.
7949 */ 7989 */
7950 exclude_super_stripes(root, cache); 7990 ret = exclude_super_stripes(root, cache);
7991 if (ret) {
7992 /*
7993 * We may have excluded something, so call this just in
7994 * case.
7995 */
7996 free_excluded_extents(root, cache);
7997 kfree(cache->free_space_ctl);
7998 kfree(cache);
7999 goto error;
8000 }
7951 8001
7952 /* 8002 /*
7953 * check for two cases, either we are full, and therefore 8003 * check for two cases, either we are full, and therefore
@@ -8089,7 +8139,17 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8089 8139
8090 cache->last_byte_to_unpin = (u64)-1; 8140 cache->last_byte_to_unpin = (u64)-1;
8091 cache->cached = BTRFS_CACHE_FINISHED; 8141 cache->cached = BTRFS_CACHE_FINISHED;
8092 exclude_super_stripes(root, cache); 8142 ret = exclude_super_stripes(root, cache);
8143 if (ret) {
8144 /*
8145 * We may have excluded something, so call this just in
8146 * case.
8147 */
8148 free_excluded_extents(root, cache);
8149 kfree(cache->free_space_ctl);
8150 kfree(cache);
8151 return ret;
8152 }
8093 8153
8094 add_new_free_space(cache, root->fs_info, chunk_offset, 8154 add_new_free_space(cache, root->fs_info, chunk_offset,
8095 chunk_offset + size); 8155 chunk_offset + size);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index f173c5af6461..cdee391fc7bf 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1257,6 +1257,39 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1257 GFP_NOFS); 1257 GFP_NOFS);
1258} 1258}
1259 1259
1260int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1261{
1262 unsigned long index = start >> PAGE_CACHE_SHIFT;
1263 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1264 struct page *page;
1265
1266 while (index <= end_index) {
1267 page = find_get_page(inode->i_mapping, index);
1268 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1269 clear_page_dirty_for_io(page);
1270 page_cache_release(page);
1271 index++;
1272 }
1273 return 0;
1274}
1275
1276int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1277{
1278 unsigned long index = start >> PAGE_CACHE_SHIFT;
1279 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1280 struct page *page;
1281
1282 while (index <= end_index) {
1283 page = find_get_page(inode->i_mapping, index);
1284 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1285 account_page_redirty(page);
1286 __set_page_dirty_nobuffers(page);
1287 page_cache_release(page);
1288 index++;
1289 }
1290 return 0;
1291}
1292
1260/* 1293/*
1261 * helper function to set both pages and extents in the tree writeback 1294 * helper function to set both pages and extents in the tree writeback
1262 */ 1295 */
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 6068a1985560..258c92156857 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -325,6 +325,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
325 unsigned long *map_len); 325 unsigned long *map_len);
326int extent_range_uptodate(struct extent_io_tree *tree, 326int extent_range_uptodate(struct extent_io_tree *tree,
327 u64 start, u64 end); 327 u64 start, u64 end);
328int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
329int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
328int extent_clear_unlock_delalloc(struct inode *inode, 330int extent_clear_unlock_delalloc(struct inode *inode,
329 struct extent_io_tree *tree, 331 struct extent_io_tree *tree,
330 u64 start, u64 end, struct page *locked_page, 332 u64 start, u64 end, struct page *locked_page,
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index ec160202be3e..c4628a201cb3 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -118,9 +118,11 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
118 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); 118 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
119 csums_in_item /= csum_size; 119 csums_in_item /= csum_size;
120 120
121 if (csum_offset >= csums_in_item) { 121 if (csum_offset == csums_in_item) {
122 ret = -EFBIG; 122 ret = -EFBIG;
123 goto fail; 123 goto fail;
124 } else if (csum_offset > csums_in_item) {
125 goto fail;
124 } 126 }
125 } 127 }
126 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 128 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
@@ -728,7 +730,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
728 return -ENOMEM; 730 return -ENOMEM;
729 731
730 sector_sum = sums->sums; 732 sector_sum = sums->sums;
731 trans->adding_csums = 1;
732again: 733again:
733 next_offset = (u64)-1; 734 next_offset = (u64)-1;
734 found_next = 0; 735 found_next = 0;
@@ -899,7 +900,6 @@ next_sector:
899 goto again; 900 goto again;
900 } 901 }
901out: 902out:
902 trans->adding_csums = 0;
903 btrfs_free_path(path); 903 btrfs_free_path(path);
904 return ret; 904 return ret;
905 905
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 5b4ea5f55b8f..ade03e6f7bd2 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2142,6 +2142,7 @@ static long btrfs_fallocate(struct file *file, int mode,
2142{ 2142{
2143 struct inode *inode = file_inode(file); 2143 struct inode *inode = file_inode(file);
2144 struct extent_state *cached_state = NULL; 2144 struct extent_state *cached_state = NULL;
2145 struct btrfs_root *root = BTRFS_I(inode)->root;
2145 u64 cur_offset; 2146 u64 cur_offset;
2146 u64 last_byte; 2147 u64 last_byte;
2147 u64 alloc_start; 2148 u64 alloc_start;
@@ -2169,6 +2170,11 @@ static long btrfs_fallocate(struct file *file, int mode,
2169 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); 2170 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
2170 if (ret) 2171 if (ret)
2171 return ret; 2172 return ret;
2173 if (root->fs_info->quota_enabled) {
2174 ret = btrfs_qgroup_reserve(root, alloc_end - alloc_start);
2175 if (ret)
2176 goto out_reserve_fail;
2177 }
2172 2178
2173 /* 2179 /*
2174 * wait for ordered IO before we have any locks. We'll loop again 2180 * wait for ordered IO before we have any locks. We'll loop again
@@ -2272,6 +2278,9 @@ static long btrfs_fallocate(struct file *file, int mode,
2272 &cached_state, GFP_NOFS); 2278 &cached_state, GFP_NOFS);
2273out: 2279out:
2274 mutex_unlock(&inode->i_mutex); 2280 mutex_unlock(&inode->i_mutex);
2281 if (root->fs_info->quota_enabled)
2282 btrfs_qgroup_free(root, alloc_end - alloc_start);
2283out_reserve_fail:
2275 /* Let go of our reservation. */ 2284 /* Let go of our reservation. */
2276 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); 2285 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
2277 return ret; 2286 return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ca1b767d51f7..09c58a35b429 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -353,6 +353,7 @@ static noinline int compress_file_range(struct inode *inode,
353 int i; 353 int i;
354 int will_compress; 354 int will_compress;
355 int compress_type = root->fs_info->compress_type; 355 int compress_type = root->fs_info->compress_type;
356 int redirty = 0;
356 357
357 /* if this is a small write inside eof, kick off a defrag */ 358 /* if this is a small write inside eof, kick off a defrag */
358 if ((end - start + 1) < 16 * 1024 && 359 if ((end - start + 1) < 16 * 1024 &&
@@ -415,6 +416,17 @@ again:
415 if (BTRFS_I(inode)->force_compress) 416 if (BTRFS_I(inode)->force_compress)
416 compress_type = BTRFS_I(inode)->force_compress; 417 compress_type = BTRFS_I(inode)->force_compress;
417 418
419 /*
420 * we need to call clear_page_dirty_for_io on each
421 * page in the range. Otherwise applications with the file
422 * mmap'd can wander in and change the page contents while
423 * we are compressing them.
424 *
425 * If the compression fails for any reason, we set the pages
426 * dirty again later on.
427 */
428 extent_range_clear_dirty_for_io(inode, start, end);
429 redirty = 1;
418 ret = btrfs_compress_pages(compress_type, 430 ret = btrfs_compress_pages(compress_type,
419 inode->i_mapping, start, 431 inode->i_mapping, start,
420 total_compressed, pages, 432 total_compressed, pages,
@@ -554,6 +566,8 @@ cleanup_and_bail_uncompressed:
554 __set_page_dirty_nobuffers(locked_page); 566 __set_page_dirty_nobuffers(locked_page);
555 /* unlocked later on in the async handlers */ 567 /* unlocked later on in the async handlers */
556 } 568 }
569 if (redirty)
570 extent_range_redirty_for_io(inode, start, end);
557 add_async_extent(async_cow, start, end - start + 1, 571 add_async_extent(async_cow, start, end - start + 1,
558 0, NULL, 0, BTRFS_COMPRESS_NONE); 572 0, NULL, 0, BTRFS_COMPRESS_NONE);
559 *num_added += 1; 573 *num_added += 1;
@@ -1743,8 +1757,10 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1743 struct btrfs_ordered_sum *sum; 1757 struct btrfs_ordered_sum *sum;
1744 1758
1745 list_for_each_entry(sum, list, list) { 1759 list_for_each_entry(sum, list, list) {
1760 trans->adding_csums = 1;
1746 btrfs_csum_file_blocks(trans, 1761 btrfs_csum_file_blocks(trans,
1747 BTRFS_I(inode)->root->fs_info->csum_root, sum); 1762 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1763 trans->adding_csums = 0;
1748 } 1764 }
1749 return 0; 1765 return 0;
1750} 1766}
@@ -3679,11 +3695,9 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
3679 * 1 for the dir item 3695 * 1 for the dir item
3680 * 1 for the dir index 3696 * 1 for the dir index
3681 * 1 for the inode ref 3697 * 1 for the inode ref
3682 * 1 for the inode ref in the tree log
3683 * 2 for the dir entries in the log
3684 * 1 for the inode 3698 * 1 for the inode
3685 */ 3699 */
3686 trans = btrfs_start_transaction(root, 8); 3700 trans = btrfs_start_transaction(root, 5);
3687 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) 3701 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
3688 return trans; 3702 return trans;
3689 3703
@@ -8127,7 +8141,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
8127 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items 8141 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
8128 * should cover the worst case number of items we'll modify. 8142 * should cover the worst case number of items we'll modify.
8129 */ 8143 */
8130 trans = btrfs_start_transaction(root, 20); 8144 trans = btrfs_start_transaction(root, 11);
8131 if (IS_ERR(trans)) { 8145 if (IS_ERR(trans)) {
8132 ret = PTR_ERR(trans); 8146 ret = PTR_ERR(trans);
8133 goto out_notrans; 8147 goto out_notrans;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index dc08d77b717e..005c45db699e 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -557,6 +557,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
557 INIT_LIST_HEAD(&splice); 557 INIT_LIST_HEAD(&splice);
558 INIT_LIST_HEAD(&works); 558 INIT_LIST_HEAD(&works);
559 559
560 mutex_lock(&root->fs_info->ordered_operations_mutex);
560 spin_lock(&root->fs_info->ordered_extent_lock); 561 spin_lock(&root->fs_info->ordered_extent_lock);
561 list_splice_init(&root->fs_info->ordered_extents, &splice); 562 list_splice_init(&root->fs_info->ordered_extents, &splice);
562 while (!list_empty(&splice)) { 563 while (!list_empty(&splice)) {
@@ -600,6 +601,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
600 601
601 cond_resched(); 602 cond_resched();
602 } 603 }
604 mutex_unlock(&root->fs_info->ordered_operations_mutex);
603} 605}
604 606
605/* 607/*
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5471e47d6559..b44124dd2370 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1153,7 +1153,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
1153 ret = btrfs_find_all_roots(trans, fs_info, node->bytenr, 1153 ret = btrfs_find_all_roots(trans, fs_info, node->bytenr,
1154 sgn > 0 ? node->seq - 1 : node->seq, &roots); 1154 sgn > 0 ? node->seq - 1 : node->seq, &roots);
1155 if (ret < 0) 1155 if (ret < 0)
1156 goto out; 1156 return ret;
1157 1157
1158 spin_lock(&fs_info->qgroup_lock); 1158 spin_lock(&fs_info->qgroup_lock);
1159 quota_root = fs_info->quota_root; 1159 quota_root = fs_info->quota_root;
@@ -1275,7 +1275,6 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
1275 ret = 0; 1275 ret = 0;
1276unlock: 1276unlock:
1277 spin_unlock(&fs_info->qgroup_lock); 1277 spin_unlock(&fs_info->qgroup_lock);
1278out:
1279 ulist_free(roots); 1278 ulist_free(roots);
1280 ulist_free(tmp); 1279 ulist_free(tmp);
1281 1280
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 53c3501fa4ca..85e072b956d5 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -542,7 +542,6 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
542 eb = path->nodes[0]; 542 eb = path->nodes[0];
543 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 543 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
544 item_size = btrfs_item_size_nr(eb, path->slots[0]); 544 item_size = btrfs_item_size_nr(eb, path->slots[0]);
545 btrfs_release_path(path);
546 545
547 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 546 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
548 do { 547 do {
@@ -558,7 +557,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
558 ret < 0 ? -1 : ref_level, 557 ret < 0 ? -1 : ref_level,
559 ret < 0 ? -1 : ref_root); 558 ret < 0 ? -1 : ref_root);
560 } while (ret != 1); 559 } while (ret != 1);
560 btrfs_release_path(path);
561 } else { 561 } else {
562 btrfs_release_path(path);
562 swarn.path = path; 563 swarn.path = path;
563 swarn.dev = dev; 564 swarn.dev = dev;
564 iterate_extent_inodes(fs_info, found_key.objectid, 565 iterate_extent_inodes(fs_info, found_key.objectid,
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index f7a8b861058b..c85e7c6b4598 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -3945,12 +3945,10 @@ static int is_extent_unchanged(struct send_ctx *sctx,
3945 found_key.type != key.type) { 3945 found_key.type != key.type) {
3946 key.offset += right_len; 3946 key.offset += right_len;
3947 break; 3947 break;
3948 } else { 3948 }
3949 if (found_key.offset != key.offset + right_len) { 3949 if (found_key.offset != key.offset + right_len) {
3950 /* Should really not happen */ 3950 ret = 0;
3951 ret = -EIO; 3951 goto out;
3952 goto out;
3953 }
3954 } 3952 }
3955 key = found_key; 3953 key = found_key;
3956 } 3954 }
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 451fad96ecd1..ef96381569a4 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -317,6 +317,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
317 unsigned long src_ptr; 317 unsigned long src_ptr;
318 unsigned long dst_ptr; 318 unsigned long dst_ptr;
319 int overwrite_root = 0; 319 int overwrite_root = 0;
320 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
320 321
321 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 322 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
322 overwrite_root = 1; 323 overwrite_root = 1;
@@ -326,6 +327,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
326 327
327 /* look for the key in the destination tree */ 328 /* look for the key in the destination tree */
328 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 329 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
330 if (ret < 0)
331 return ret;
332
329 if (ret == 0) { 333 if (ret == 0) {
330 char *src_copy; 334 char *src_copy;
331 char *dst_copy; 335 char *dst_copy;
@@ -367,6 +371,30 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
367 return 0; 371 return 0;
368 } 372 }
369 373
374 /*
375 * We need to load the old nbytes into the inode so when we
376 * replay the extents we've logged we get the right nbytes.
377 */
378 if (inode_item) {
379 struct btrfs_inode_item *item;
380 u64 nbytes;
381
382 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
383 struct btrfs_inode_item);
384 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
385 item = btrfs_item_ptr(eb, slot,
386 struct btrfs_inode_item);
387 btrfs_set_inode_nbytes(eb, item, nbytes);
388 }
389 } else if (inode_item) {
390 struct btrfs_inode_item *item;
391
392 /*
393 * New inode, set nbytes to 0 so that the nbytes comes out
394 * properly when we replay the extents.
395 */
396 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
397 btrfs_set_inode_nbytes(eb, item, 0);
370 } 398 }
371insert: 399insert:
372 btrfs_release_path(path); 400 btrfs_release_path(path);
@@ -486,7 +514,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
486 int found_type; 514 int found_type;
487 u64 extent_end; 515 u64 extent_end;
488 u64 start = key->offset; 516 u64 start = key->offset;
489 u64 saved_nbytes; 517 u64 nbytes = 0;
490 struct btrfs_file_extent_item *item; 518 struct btrfs_file_extent_item *item;
491 struct inode *inode = NULL; 519 struct inode *inode = NULL;
492 unsigned long size; 520 unsigned long size;
@@ -496,10 +524,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
496 found_type = btrfs_file_extent_type(eb, item); 524 found_type = btrfs_file_extent_type(eb, item);
497 525
498 if (found_type == BTRFS_FILE_EXTENT_REG || 526 if (found_type == BTRFS_FILE_EXTENT_REG ||
499 found_type == BTRFS_FILE_EXTENT_PREALLOC) 527 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
500 extent_end = start + btrfs_file_extent_num_bytes(eb, item); 528 nbytes = btrfs_file_extent_num_bytes(eb, item);
501 else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 529 extent_end = start + nbytes;
530
531 /*
532 * We don't add to the inodes nbytes if we are prealloc or a
533 * hole.
534 */
535 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
536 nbytes = 0;
537 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
502 size = btrfs_file_extent_inline_len(eb, item); 538 size = btrfs_file_extent_inline_len(eb, item);
539 nbytes = btrfs_file_extent_ram_bytes(eb, item);
503 extent_end = ALIGN(start + size, root->sectorsize); 540 extent_end = ALIGN(start + size, root->sectorsize);
504 } else { 541 } else {
505 ret = 0; 542 ret = 0;
@@ -548,7 +585,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
548 } 585 }
549 btrfs_release_path(path); 586 btrfs_release_path(path);
550 587
551 saved_nbytes = inode_get_bytes(inode);
552 /* drop any overlapping extents */ 588 /* drop any overlapping extents */
553 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); 589 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
554 BUG_ON(ret); 590 BUG_ON(ret);
@@ -635,7 +671,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
635 BUG_ON(ret); 671 BUG_ON(ret);
636 } 672 }
637 673
638 inode_set_bytes(inode, saved_nbytes); 674 inode_add_bytes(inode, nbytes);
639 ret = btrfs_update_inode(trans, root, inode); 675 ret = btrfs_update_inode(trans, root, inode);
640out: 676out:
641 if (inode) 677 if (inode)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 5989a92236f7..2854c824ab64 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4935,7 +4935,18 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4935 em = lookup_extent_mapping(em_tree, chunk_start, 1); 4935 em = lookup_extent_mapping(em_tree, chunk_start, 1);
4936 read_unlock(&em_tree->lock); 4936 read_unlock(&em_tree->lock);
4937 4937
4938 BUG_ON(!em || em->start != chunk_start); 4938 if (!em) {
4939 printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
4940 chunk_start);
4941 return -EIO;
4942 }
4943
4944 if (em->start != chunk_start) {
4945 printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
4946 em->start, chunk_start);
4947 free_extent_map(em);
4948 return -EIO;
4949 }
4939 map = (struct map_lookup *)em->bdev; 4950 map = (struct map_lookup *)em->bdev;
4940 4951
4941 length = em->len; 4952 length = em->len;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 991c63c6bdd0..21b3a291c327 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1575,14 +1575,24 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1575 } 1575 }
1576 break; 1576 break;
1577 case Opt_blank_pass: 1577 case Opt_blank_pass:
1578 vol->password = NULL;
1579 break;
1580 case Opt_pass:
1581 /* passwords have to be handled differently 1578 /* passwords have to be handled differently
1582 * to allow the character used for deliminator 1579 * to allow the character used for deliminator
1583 * to be passed within them 1580 * to be passed within them
1584 */ 1581 */
1585 1582
1583 /*
1584 * Check if this is a case where the password
1585 * starts with a delimiter
1586 */
1587 tmp_end = strchr(data, '=');
1588 tmp_end++;
1589 if (!(tmp_end < end && tmp_end[1] == delim)) {
1590 /* No it is not. Set the password to NULL */
1591 vol->password = NULL;
1592 break;
1593 }
1594 /* Yes it is. Drop down to Opt_pass below.*/
1595 case Opt_pass:
1586 /* Obtain the value string */ 1596 /* Obtain the value string */
1587 value = strchr(data, '='); 1597 value = strchr(data, '=');
1588 value++; 1598 value++;
diff --git a/fs/dcache.c b/fs/dcache.c
index fbfae008ba44..e8bc3420d63e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2542,7 +2542,6 @@ static int prepend_path(const struct path *path,
2542 bool slash = false; 2542 bool slash = false;
2543 int error = 0; 2543 int error = 0;
2544 2544
2545 br_read_lock(&vfsmount_lock);
2546 while (dentry != root->dentry || vfsmnt != root->mnt) { 2545 while (dentry != root->dentry || vfsmnt != root->mnt) {
2547 struct dentry * parent; 2546 struct dentry * parent;
2548 2547
@@ -2572,8 +2571,6 @@ static int prepend_path(const struct path *path,
2572 if (!error && !slash) 2571 if (!error && !slash)
2573 error = prepend(buffer, buflen, "/", 1); 2572 error = prepend(buffer, buflen, "/", 1);
2574 2573
2575out:
2576 br_read_unlock(&vfsmount_lock);
2577 return error; 2574 return error;
2578 2575
2579global_root: 2576global_root:
@@ -2590,7 +2587,7 @@ global_root:
2590 error = prepend(buffer, buflen, "/", 1); 2587 error = prepend(buffer, buflen, "/", 1);
2591 if (!error) 2588 if (!error)
2592 error = is_mounted(vfsmnt) ? 1 : 2; 2589 error = is_mounted(vfsmnt) ? 1 : 2;
2593 goto out; 2590 return error;
2594} 2591}
2595 2592
2596/** 2593/**
@@ -2617,9 +2614,11 @@ char *__d_path(const struct path *path,
2617 int error; 2614 int error;
2618 2615
2619 prepend(&res, &buflen, "\0", 1); 2616 prepend(&res, &buflen, "\0", 1);
2617 br_read_lock(&vfsmount_lock);
2620 write_seqlock(&rename_lock); 2618 write_seqlock(&rename_lock);
2621 error = prepend_path(path, root, &res, &buflen); 2619 error = prepend_path(path, root, &res, &buflen);
2622 write_sequnlock(&rename_lock); 2620 write_sequnlock(&rename_lock);
2621 br_read_unlock(&vfsmount_lock);
2623 2622
2624 if (error < 0) 2623 if (error < 0)
2625 return ERR_PTR(error); 2624 return ERR_PTR(error);
@@ -2636,9 +2635,11 @@ char *d_absolute_path(const struct path *path,
2636 int error; 2635 int error;
2637 2636
2638 prepend(&res, &buflen, "\0", 1); 2637 prepend(&res, &buflen, "\0", 1);
2638 br_read_lock(&vfsmount_lock);
2639 write_seqlock(&rename_lock); 2639 write_seqlock(&rename_lock);
2640 error = prepend_path(path, &root, &res, &buflen); 2640 error = prepend_path(path, &root, &res, &buflen);
2641 write_sequnlock(&rename_lock); 2641 write_sequnlock(&rename_lock);
2642 br_read_unlock(&vfsmount_lock);
2642 2643
2643 if (error > 1) 2644 if (error > 1)
2644 error = -EINVAL; 2645 error = -EINVAL;
@@ -2702,11 +2703,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
2702 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2703 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2703 2704
2704 get_fs_root(current->fs, &root); 2705 get_fs_root(current->fs, &root);
2706 br_read_lock(&vfsmount_lock);
2705 write_seqlock(&rename_lock); 2707 write_seqlock(&rename_lock);
2706 error = path_with_deleted(path, &root, &res, &buflen); 2708 error = path_with_deleted(path, &root, &res, &buflen);
2709 write_sequnlock(&rename_lock);
2710 br_read_unlock(&vfsmount_lock);
2707 if (error < 0) 2711 if (error < 0)
2708 res = ERR_PTR(error); 2712 res = ERR_PTR(error);
2709 write_sequnlock(&rename_lock);
2710 path_put(&root); 2713 path_put(&root);
2711 return res; 2714 return res;
2712} 2715}
@@ -2830,6 +2833,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2830 get_fs_root_and_pwd(current->fs, &root, &pwd); 2833 get_fs_root_and_pwd(current->fs, &root, &pwd);
2831 2834
2832 error = -ENOENT; 2835 error = -ENOENT;
2836 br_read_lock(&vfsmount_lock);
2833 write_seqlock(&rename_lock); 2837 write_seqlock(&rename_lock);
2834 if (!d_unlinked(pwd.dentry)) { 2838 if (!d_unlinked(pwd.dentry)) {
2835 unsigned long len; 2839 unsigned long len;
@@ -2839,6 +2843,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2839 prepend(&cwd, &buflen, "\0", 1); 2843 prepend(&cwd, &buflen, "\0", 1);
2840 error = prepend_path(&pwd, &root, &cwd, &buflen); 2844 error = prepend_path(&pwd, &root, &cwd, &buflen);
2841 write_sequnlock(&rename_lock); 2845 write_sequnlock(&rename_lock);
2846 br_read_unlock(&vfsmount_lock);
2842 2847
2843 if (error < 0) 2848 if (error < 0)
2844 goto out; 2849 goto out;
@@ -2859,6 +2864,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2859 } 2864 }
2860 } else { 2865 } else {
2861 write_sequnlock(&rename_lock); 2866 write_sequnlock(&rename_lock);
2867 br_read_unlock(&vfsmount_lock);
2862 } 2868 }
2863 2869
2864out: 2870out:
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index 412e6eda25f8..e4141f257495 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -80,13 +80,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
80 int rc; 80 int rc;
81 81
82 mutex_lock(&ecryptfs_daemon_hash_mux); 82 mutex_lock(&ecryptfs_daemon_hash_mux);
83 rc = try_module_get(THIS_MODULE);
84 if (rc == 0) {
85 rc = -EIO;
86 printk(KERN_ERR "%s: Error attempting to increment module use "
87 "count; rc = [%d]\n", __func__, rc);
88 goto out_unlock_daemon_list;
89 }
90 rc = ecryptfs_find_daemon_by_euid(&daemon); 83 rc = ecryptfs_find_daemon_by_euid(&daemon);
91 if (!rc) { 84 if (!rc) {
92 rc = -EINVAL; 85 rc = -EINVAL;
@@ -96,7 +89,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
96 if (rc) { 89 if (rc) {
97 printk(KERN_ERR "%s: Error attempting to spawn daemon; " 90 printk(KERN_ERR "%s: Error attempting to spawn daemon; "
98 "rc = [%d]\n", __func__, rc); 91 "rc = [%d]\n", __func__, rc);
99 goto out_module_put_unlock_daemon_list; 92 goto out_unlock_daemon_list;
100 } 93 }
101 mutex_lock(&daemon->mux); 94 mutex_lock(&daemon->mux);
102 if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { 95 if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) {
@@ -108,9 +101,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
108 atomic_inc(&ecryptfs_num_miscdev_opens); 101 atomic_inc(&ecryptfs_num_miscdev_opens);
109out_unlock_daemon: 102out_unlock_daemon:
110 mutex_unlock(&daemon->mux); 103 mutex_unlock(&daemon->mux);
111out_module_put_unlock_daemon_list:
112 if (rc)
113 module_put(THIS_MODULE);
114out_unlock_daemon_list: 104out_unlock_daemon_list:
115 mutex_unlock(&ecryptfs_daemon_hash_mux); 105 mutex_unlock(&ecryptfs_daemon_hash_mux);
116 return rc; 106 return rc;
@@ -147,7 +137,6 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
147 "bug.\n", __func__, rc); 137 "bug.\n", __func__, rc);
148 BUG(); 138 BUG();
149 } 139 }
150 module_put(THIS_MODULE);
151 return rc; 140 return rc;
152} 141}
153 142
@@ -471,6 +460,7 @@ out_free:
471 460
472 461
473static const struct file_operations ecryptfs_miscdev_fops = { 462static const struct file_operations ecryptfs_miscdev_fops = {
463 .owner = THIS_MODULE,
474 .open = ecryptfs_miscdev_open, 464 .open = ecryptfs_miscdev_open,
475 .poll = ecryptfs_miscdev_poll, 465 .poll = ecryptfs_miscdev_poll,
476 .read = ecryptfs_miscdev_read, 466 .read = ecryptfs_miscdev_read,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 56efcaadf848..9c6d06dcef8b 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2999,20 +2999,23 @@ static int ext4_split_extent_at(handle_t *handle,
2999 if (split_flag & EXT4_EXT_DATA_VALID1) { 2999 if (split_flag & EXT4_EXT_DATA_VALID1) {
3000 err = ext4_ext_zeroout(inode, ex2); 3000 err = ext4_ext_zeroout(inode, ex2);
3001 zero_ex.ee_block = ex2->ee_block; 3001 zero_ex.ee_block = ex2->ee_block;
3002 zero_ex.ee_len = ext4_ext_get_actual_len(ex2); 3002 zero_ex.ee_len = cpu_to_le16(
3003 ext4_ext_get_actual_len(ex2));
3003 ext4_ext_store_pblock(&zero_ex, 3004 ext4_ext_store_pblock(&zero_ex,
3004 ext4_ext_pblock(ex2)); 3005 ext4_ext_pblock(ex2));
3005 } else { 3006 } else {
3006 err = ext4_ext_zeroout(inode, ex); 3007 err = ext4_ext_zeroout(inode, ex);
3007 zero_ex.ee_block = ex->ee_block; 3008 zero_ex.ee_block = ex->ee_block;
3008 zero_ex.ee_len = ext4_ext_get_actual_len(ex); 3009 zero_ex.ee_len = cpu_to_le16(
3010 ext4_ext_get_actual_len(ex));
3009 ext4_ext_store_pblock(&zero_ex, 3011 ext4_ext_store_pblock(&zero_ex,
3010 ext4_ext_pblock(ex)); 3012 ext4_ext_pblock(ex));
3011 } 3013 }
3012 } else { 3014 } else {
3013 err = ext4_ext_zeroout(inode, &orig_ex); 3015 err = ext4_ext_zeroout(inode, &orig_ex);
3014 zero_ex.ee_block = orig_ex.ee_block; 3016 zero_ex.ee_block = orig_ex.ee_block;
3015 zero_ex.ee_len = ext4_ext_get_actual_len(&orig_ex); 3017 zero_ex.ee_len = cpu_to_le16(
3018 ext4_ext_get_actual_len(&orig_ex));
3016 ext4_ext_store_pblock(&zero_ex, 3019 ext4_ext_store_pblock(&zero_ex,
3017 ext4_ext_pblock(&orig_ex)); 3020 ext4_ext_pblock(&orig_ex));
3018 } 3021 }
@@ -3272,7 +3275,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3272 if (err) 3275 if (err)
3273 goto out; 3276 goto out;
3274 zero_ex.ee_block = ex->ee_block; 3277 zero_ex.ee_block = ex->ee_block;
3275 zero_ex.ee_len = ext4_ext_get_actual_len(ex); 3278 zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
3276 ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); 3279 ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
3277 3280
3278 err = ext4_ext_get_access(handle, inode, path + depth); 3281 err = ext4_ext_get_access(handle, inode, path + depth);
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index b505a145a593..a04183127ef0 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -1539,9 +1539,9 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
1539 blk = *i_data; 1539 blk = *i_data;
1540 if (level > 0) { 1540 if (level > 0) {
1541 ext4_lblk_t first2; 1541 ext4_lblk_t first2;
1542 bh = sb_bread(inode->i_sb, blk); 1542 bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
1543 if (!bh) { 1543 if (!bh) {
1544 EXT4_ERROR_INODE_BLOCK(inode, blk, 1544 EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
1545 "Read failure"); 1545 "Read failure");
1546 return -EIO; 1546 return -EIO;
1547 } 1547 }
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 019f45e45097..d79c2dadc536 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -923,8 +923,11 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
923 cmd = F_SETLK; 923 cmd = F_SETLK;
924 fl->fl_type = F_UNLCK; 924 fl->fl_type = F_UNLCK;
925 } 925 }
926 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 926 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
927 if (fl->fl_type == F_UNLCK)
928 posix_lock_file_wait(file, fl);
927 return -EIO; 929 return -EIO;
930 }
928 if (IS_GETLK(cmd)) 931 if (IS_GETLK(cmd))
929 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); 932 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
930 else if (fl->fl_type == F_UNLCK) 933 else if (fl->fl_type == F_UNLCK)
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 156e42ec84ea..5c29216e9cc1 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -588,6 +588,7 @@ struct lm_lockstruct {
588 struct dlm_lksb ls_control_lksb; /* control_lock */ 588 struct dlm_lksb ls_control_lksb; /* control_lock */
589 char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */ 589 char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */
590 struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */ 590 struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
591 char *ls_lvb_bits;
591 592
592 spinlock_t ls_recover_spin; /* protects following fields */ 593 spinlock_t ls_recover_spin; /* protects following fields */
593 unsigned long ls_recover_flags; /* DFL_ */ 594 unsigned long ls_recover_flags; /* DFL_ */
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 9802de0f85e6..c8423d6de6c3 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -483,12 +483,8 @@ static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,
483 483
484static int all_jid_bits_clear(char *lvb) 484static int all_jid_bits_clear(char *lvb)
485{ 485{
486 int i; 486 return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0,
487 for (i = JID_BITMAP_OFFSET; i < GDLM_LVB_SIZE; i++) { 487 GDLM_LVB_SIZE - JID_BITMAP_OFFSET);
488 if (lvb[i])
489 return 0;
490 }
491 return 1;
492} 488}
493 489
494static void sync_wait_cb(void *arg) 490static void sync_wait_cb(void *arg)
@@ -580,7 +576,6 @@ static void gfs2_control_func(struct work_struct *work)
580{ 576{
581 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); 577 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
582 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 578 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
583 char lvb_bits[GDLM_LVB_SIZE];
584 uint32_t block_gen, start_gen, lvb_gen, flags; 579 uint32_t block_gen, start_gen, lvb_gen, flags;
585 int recover_set = 0; 580 int recover_set = 0;
586 int write_lvb = 0; 581 int write_lvb = 0;
@@ -634,7 +629,7 @@ static void gfs2_control_func(struct work_struct *work)
634 return; 629 return;
635 } 630 }
636 631
637 control_lvb_read(ls, &lvb_gen, lvb_bits); 632 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
638 633
639 spin_lock(&ls->ls_recover_spin); 634 spin_lock(&ls->ls_recover_spin);
640 if (block_gen != ls->ls_recover_block || 635 if (block_gen != ls->ls_recover_block ||
@@ -664,10 +659,10 @@ static void gfs2_control_func(struct work_struct *work)
664 659
665 ls->ls_recover_result[i] = 0; 660 ls->ls_recover_result[i] = 0;
666 661
667 if (!test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) 662 if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET))
668 continue; 663 continue;
669 664
670 __clear_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); 665 __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
671 write_lvb = 1; 666 write_lvb = 1;
672 } 667 }
673 } 668 }
@@ -691,7 +686,7 @@ static void gfs2_control_func(struct work_struct *work)
691 continue; 686 continue;
692 if (ls->ls_recover_submit[i] < start_gen) { 687 if (ls->ls_recover_submit[i] < start_gen) {
693 ls->ls_recover_submit[i] = 0; 688 ls->ls_recover_submit[i] = 0;
694 __set_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); 689 __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
695 } 690 }
696 } 691 }
697 /* even if there are no bits to set, we need to write the 692 /* even if there are no bits to set, we need to write the
@@ -705,7 +700,7 @@ static void gfs2_control_func(struct work_struct *work)
705 spin_unlock(&ls->ls_recover_spin); 700 spin_unlock(&ls->ls_recover_spin);
706 701
707 if (write_lvb) { 702 if (write_lvb) {
708 control_lvb_write(ls, start_gen, lvb_bits); 703 control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
709 flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; 704 flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK;
710 } else { 705 } else {
711 flags = DLM_LKF_CONVERT; 706 flags = DLM_LKF_CONVERT;
@@ -725,7 +720,7 @@ static void gfs2_control_func(struct work_struct *work)
725 */ 720 */
726 721
727 for (i = 0; i < recover_size; i++) { 722 for (i = 0; i < recover_size; i++) {
728 if (test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) { 723 if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) {
729 fs_info(sdp, "recover generation %u jid %d\n", 724 fs_info(sdp, "recover generation %u jid %d\n",
730 start_gen, i); 725 start_gen, i);
731 gfs2_recover_set(sdp, i); 726 gfs2_recover_set(sdp, i);
@@ -758,7 +753,6 @@ static void gfs2_control_func(struct work_struct *work)
758static int control_mount(struct gfs2_sbd *sdp) 753static int control_mount(struct gfs2_sbd *sdp)
759{ 754{
760 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 755 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
761 char lvb_bits[GDLM_LVB_SIZE];
762 uint32_t start_gen, block_gen, mount_gen, lvb_gen; 756 uint32_t start_gen, block_gen, mount_gen, lvb_gen;
763 int mounted_mode; 757 int mounted_mode;
764 int retries = 0; 758 int retries = 0;
@@ -857,7 +851,7 @@ locks_done:
857 * lvb_gen will be non-zero. 851 * lvb_gen will be non-zero.
858 */ 852 */
859 853
860 control_lvb_read(ls, &lvb_gen, lvb_bits); 854 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
861 855
862 if (lvb_gen == 0xFFFFFFFF) { 856 if (lvb_gen == 0xFFFFFFFF) {
863 /* special value to force mount attempts to fail */ 857 /* special value to force mount attempts to fail */
@@ -887,7 +881,7 @@ locks_done:
887 * and all lvb bits to be clear (no pending journal recoveries.) 881 * and all lvb bits to be clear (no pending journal recoveries.)
888 */ 882 */
889 883
890 if (!all_jid_bits_clear(lvb_bits)) { 884 if (!all_jid_bits_clear(ls->ls_lvb_bits)) {
891 /* journals need recovery, wait until all are clear */ 885 /* journals need recovery, wait until all are clear */
892 fs_info(sdp, "control_mount wait for journal recovery\n"); 886 fs_info(sdp, "control_mount wait for journal recovery\n");
893 goto restart; 887 goto restart;
@@ -949,7 +943,6 @@ static int dlm_recovery_wait(void *word)
949static int control_first_done(struct gfs2_sbd *sdp) 943static int control_first_done(struct gfs2_sbd *sdp)
950{ 944{
951 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 945 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
952 char lvb_bits[GDLM_LVB_SIZE];
953 uint32_t start_gen, block_gen; 946 uint32_t start_gen, block_gen;
954 int error; 947 int error;
955 948
@@ -991,8 +984,8 @@ restart:
991 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); 984 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
992 spin_unlock(&ls->ls_recover_spin); 985 spin_unlock(&ls->ls_recover_spin);
993 986
994 memset(lvb_bits, 0, sizeof(lvb_bits)); 987 memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE);
995 control_lvb_write(ls, start_gen, lvb_bits); 988 control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
996 989
997 error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT); 990 error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT);
998 if (error) 991 if (error)
@@ -1022,6 +1015,12 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1022 uint32_t old_size, new_size; 1015 uint32_t old_size, new_size;
1023 int i, max_jid; 1016 int i, max_jid;
1024 1017
1018 if (!ls->ls_lvb_bits) {
1019 ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
1020 if (!ls->ls_lvb_bits)
1021 return -ENOMEM;
1022 }
1023
1025 max_jid = 0; 1024 max_jid = 0;
1026 for (i = 0; i < num_slots; i++) { 1025 for (i = 0; i < num_slots; i++) {
1027 if (max_jid < slots[i].slot - 1) 1026 if (max_jid < slots[i].slot - 1)
@@ -1057,6 +1056,7 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1057 1056
1058static void free_recover_size(struct lm_lockstruct *ls) 1057static void free_recover_size(struct lm_lockstruct *ls)
1059{ 1058{
1059 kfree(ls->ls_lvb_bits);
1060 kfree(ls->ls_recover_submit); 1060 kfree(ls->ls_recover_submit);
1061 kfree(ls->ls_recover_result); 1061 kfree(ls->ls_recover_result);
1062 ls->ls_recover_submit = NULL; 1062 ls->ls_recover_submit = NULL;
@@ -1205,6 +1205,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
1205 ls->ls_recover_size = 0; 1205 ls->ls_recover_size = 0;
1206 ls->ls_recover_submit = NULL; 1206 ls->ls_recover_submit = NULL;
1207 ls->ls_recover_result = NULL; 1207 ls->ls_recover_result = NULL;
1208 ls->ls_lvb_bits = NULL;
1208 1209
1209 error = set_recover_size(sdp, NULL, 0); 1210 error = set_recover_size(sdp, NULL, 0);
1210 if (error) 1211 if (error)
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index d1f51fd73f86..5a51265a4341 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -576,7 +576,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip)
576 RB_CLEAR_NODE(&ip->i_res->rs_node); 576 RB_CLEAR_NODE(&ip->i_res->rs_node);
577out: 577out:
578 up_write(&ip->i_rw_mutex); 578 up_write(&ip->i_rw_mutex);
579 return 0; 579 return error;
580} 580}
581 581
582static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) 582static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
@@ -1181,12 +1181,9 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1181 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed) 1181 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1182{ 1182{
1183 struct super_block *sb = sdp->sd_vfs; 1183 struct super_block *sb = sdp->sd_vfs;
1184 struct block_device *bdev = sb->s_bdev;
1185 const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
1186 bdev_logical_block_size(sb->s_bdev);
1187 u64 blk; 1184 u64 blk;
1188 sector_t start = 0; 1185 sector_t start = 0;
1189 sector_t nr_sects = 0; 1186 sector_t nr_blks = 0;
1190 int rv; 1187 int rv;
1191 unsigned int x; 1188 unsigned int x;
1192 u32 trimmed = 0; 1189 u32 trimmed = 0;
@@ -1206,35 +1203,34 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1206 if (diff == 0) 1203 if (diff == 0)
1207 continue; 1204 continue;
1208 blk = offset + ((bi->bi_start + x) * GFS2_NBBY); 1205 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1209 blk *= sects_per_blk; /* convert to sectors */
1210 while(diff) { 1206 while(diff) {
1211 if (diff & 1) { 1207 if (diff & 1) {
1212 if (nr_sects == 0) 1208 if (nr_blks == 0)
1213 goto start_new_extent; 1209 goto start_new_extent;
1214 if ((start + nr_sects) != blk) { 1210 if ((start + nr_blks) != blk) {
1215 if (nr_sects >= minlen) { 1211 if (nr_blks >= minlen) {
1216 rv = blkdev_issue_discard(bdev, 1212 rv = sb_issue_discard(sb,
1217 start, nr_sects, 1213 start, nr_blks,
1218 GFP_NOFS, 0); 1214 GFP_NOFS, 0);
1219 if (rv) 1215 if (rv)
1220 goto fail; 1216 goto fail;
1221 trimmed += nr_sects; 1217 trimmed += nr_blks;
1222 } 1218 }
1223 nr_sects = 0; 1219 nr_blks = 0;
1224start_new_extent: 1220start_new_extent:
1225 start = blk; 1221 start = blk;
1226 } 1222 }
1227 nr_sects += sects_per_blk; 1223 nr_blks++;
1228 } 1224 }
1229 diff >>= 2; 1225 diff >>= 2;
1230 blk += sects_per_blk; 1226 blk++;
1231 } 1227 }
1232 } 1228 }
1233 if (nr_sects >= minlen) { 1229 if (nr_blks >= minlen) {
1234 rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0); 1230 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
1235 if (rv) 1231 if (rv)
1236 goto fail; 1232 goto fail;
1237 trimmed += nr_sects; 1233 trimmed += nr_blks;
1238 } 1234 }
1239 if (ptrimmed) 1235 if (ptrimmed)
1240 *ptrimmed = trimmed; 1236 *ptrimmed = trimmed;
diff --git a/fs/inode.c b/fs/inode.c
index f5f7c06c36fb..a898b3d43ccf 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -725,7 +725,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
725 * inode to the back of the list so we don't spin on it. 725 * inode to the back of the list so we don't spin on it.
726 */ 726 */
727 if (!spin_trylock(&inode->i_lock)) { 727 if (!spin_trylock(&inode->i_lock)) {
728 list_move_tail(&inode->i_lru, &sb->s_inode_lru); 728 list_move(&inode->i_lru, &sb->s_inode_lru);
729 continue; 729 continue;
730 } 730 }
731 731
diff --git a/fs/internal.h b/fs/internal.h
index 507141fceb99..4be78237d896 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -125,3 +125,8 @@ extern int invalidate_inodes(struct super_block *, bool);
125 * dcache.c 125 * dcache.c
126 */ 126 */
127extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); 127extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
128
129/*
130 * read_write.c
131 */
132extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
diff --git a/fs/namespace.c b/fs/namespace.c
index 50ca17d3cb45..341d3f564082 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -798,6 +798,10 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
798 } 798 }
799 799
800 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; 800 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
801 /* Don't allow unprivileged users to change mount flags */
802 if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
803 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
804
801 atomic_inc(&sb->s_active); 805 atomic_inc(&sb->s_active);
802 mnt->mnt.mnt_sb = sb; 806 mnt->mnt.mnt_sb = sb;
803 mnt->mnt.mnt_root = dget(root); 807 mnt->mnt.mnt_root = dget(root);
@@ -1686,7 +1690,7 @@ static int do_loopback(struct path *path, const char *old_name,
1686 1690
1687 if (IS_ERR(mnt)) { 1691 if (IS_ERR(mnt)) {
1688 err = PTR_ERR(mnt); 1692 err = PTR_ERR(mnt);
1689 goto out; 1693 goto out2;
1690 } 1694 }
1691 1695
1692 err = graft_tree(mnt, path); 1696 err = graft_tree(mnt, path);
@@ -1713,6 +1717,9 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
1713 if (readonly_request == __mnt_is_readonly(mnt)) 1717 if (readonly_request == __mnt_is_readonly(mnt))
1714 return 0; 1718 return 0;
1715 1719
1720 if (mnt->mnt_flags & MNT_LOCK_READONLY)
1721 return -EPERM;
1722
1716 if (readonly_request) 1723 if (readonly_request)
1717 error = mnt_make_readonly(real_mount(mnt)); 1724 error = mnt_make_readonly(real_mount(mnt));
1718 else 1725 else
@@ -2339,7 +2346,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2339 /* First pass: copy the tree topology */ 2346 /* First pass: copy the tree topology */
2340 copy_flags = CL_COPY_ALL | CL_EXPIRE; 2347 copy_flags = CL_COPY_ALL | CL_EXPIRE;
2341 if (user_ns != mnt_ns->user_ns) 2348 if (user_ns != mnt_ns->user_ns)
2342 copy_flags |= CL_SHARED_TO_SLAVE; 2349 copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
2343 new = copy_tree(old, old->mnt.mnt_root, copy_flags); 2350 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
2344 if (IS_ERR(new)) { 2351 if (IS_ERR(new)) {
2345 up_write(&namespace_sem); 2352 up_write(&namespace_sem);
@@ -2732,6 +2739,51 @@ bool our_mnt(struct vfsmount *mnt)
2732 return check_mnt(real_mount(mnt)); 2739 return check_mnt(real_mount(mnt));
2733} 2740}
2734 2741
2742bool current_chrooted(void)
2743{
2744 /* Does the current process have a non-standard root */
2745 struct path ns_root;
2746 struct path fs_root;
2747 bool chrooted;
2748
2749 /* Find the namespace root */
2750 ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
2751 ns_root.dentry = ns_root.mnt->mnt_root;
2752 path_get(&ns_root);
2753 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
2754 ;
2755
2756 get_fs_root(current->fs, &fs_root);
2757
2758 chrooted = !path_equal(&fs_root, &ns_root);
2759
2760 path_put(&fs_root);
2761 path_put(&ns_root);
2762
2763 return chrooted;
2764}
2765
2766void update_mnt_policy(struct user_namespace *userns)
2767{
2768 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
2769 struct mount *mnt;
2770
2771 down_read(&namespace_sem);
2772 list_for_each_entry(mnt, &ns->list, mnt_list) {
2773 switch (mnt->mnt.mnt_sb->s_magic) {
2774 case SYSFS_MAGIC:
2775 userns->may_mount_sysfs = true;
2776 break;
2777 case PROC_SUPER_MAGIC:
2778 userns->may_mount_proc = true;
2779 break;
2780 }
2781 if (userns->may_mount_sysfs && userns->may_mount_proc)
2782 break;
2783 }
2784 up_read(&namespace_sem);
2785}
2786
2735static void *mntns_get(struct task_struct *task) 2787static void *mntns_get(struct task_struct *task)
2736{ 2788{
2737 struct mnt_namespace *ns = NULL; 2789 struct mnt_namespace *ns = NULL;
diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c
index 737d839bc17b..6fc7b5cae92b 100644
--- a/fs/nfs/blocklayout/blocklayoutdm.c
+++ b/fs/nfs/blocklayout/blocklayoutdm.c
@@ -55,7 +55,8 @@ static void dev_remove(struct net *net, dev_t dev)
55 55
56 bl_pipe_msg.bl_wq = &nn->bl_wq; 56 bl_pipe_msg.bl_wq = &nn->bl_wq;
57 memset(msg, 0, sizeof(*msg)); 57 memset(msg, 0, sizeof(*msg));
58 msg->data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS); 58 msg->len = sizeof(bl_msg) + bl_msg.totallen;
59 msg->data = kzalloc(msg->len, GFP_NOFS);
59 if (!msg->data) 60 if (!msg->data)
60 goto out; 61 goto out;
61 62
@@ -66,7 +67,6 @@ static void dev_remove(struct net *net, dev_t dev)
66 memcpy(msg->data, &bl_msg, sizeof(bl_msg)); 67 memcpy(msg->data, &bl_msg, sizeof(bl_msg));
67 dataptr = (uint8_t *) msg->data; 68 dataptr = (uint8_t *) msg->data;
68 memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request)); 69 memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request));
69 msg->len = sizeof(bl_msg) + bl_msg.totallen;
70 70
71 add_wait_queue(&nn->bl_wq, &wq); 71 add_wait_queue(&nn->bl_wq, &wq);
72 if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) { 72 if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) {
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index dc0f98dfa717..c516da5873fd 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -726,9 +726,9 @@ out1:
726 return ret; 726 return ret;
727} 727}
728 728
729static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data) 729static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data, size_t datalen)
730{ 730{
731 return key_instantiate_and_link(key, data, strlen(data) + 1, 731 return key_instantiate_and_link(key, data, datalen,
732 id_resolver_cache->thread_keyring, 732 id_resolver_cache->thread_keyring,
733 authkey); 733 authkey);
734} 734}
@@ -738,6 +738,7 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
738 struct key *key, struct key *authkey) 738 struct key *key, struct key *authkey)
739{ 739{
740 char id_str[NFS_UINT_MAXLEN]; 740 char id_str[NFS_UINT_MAXLEN];
741 size_t len;
741 int ret = -ENOKEY; 742 int ret = -ENOKEY;
742 743
743 /* ret = -ENOKEY */ 744 /* ret = -ENOKEY */
@@ -747,13 +748,15 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
747 case IDMAP_CONV_NAMETOID: 748 case IDMAP_CONV_NAMETOID:
748 if (strcmp(upcall->im_name, im->im_name) != 0) 749 if (strcmp(upcall->im_name, im->im_name) != 0)
749 break; 750 break;
750 sprintf(id_str, "%d", im->im_id); 751 /* Note: here we store the NUL terminator too */
751 ret = nfs_idmap_instantiate(key, authkey, id_str); 752 len = sprintf(id_str, "%d", im->im_id) + 1;
753 ret = nfs_idmap_instantiate(key, authkey, id_str, len);
752 break; 754 break;
753 case IDMAP_CONV_IDTONAME: 755 case IDMAP_CONV_IDTONAME:
754 if (upcall->im_id != im->im_id) 756 if (upcall->im_id != im->im_id)
755 break; 757 break;
756 ret = nfs_idmap_instantiate(key, authkey, im->im_name); 758 len = strlen(im->im_name);
759 ret = nfs_idmap_instantiate(key, authkey, im->im_name, len);
757 break; 760 break;
758 default: 761 default:
759 ret = -EINVAL; 762 ret = -EINVAL;
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index ac4fc9a8fdbc..66b6664dcd4c 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -300,7 +300,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
300 struct rpc_cred *cred) 300 struct rpc_cred *cred)
301{ 301{
302 struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); 302 struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
303 struct nfs_client *pos, *n, *prev = NULL; 303 struct nfs_client *pos, *prev = NULL;
304 struct nfs4_setclientid_res clid = { 304 struct nfs4_setclientid_res clid = {
305 .clientid = new->cl_clientid, 305 .clientid = new->cl_clientid,
306 .confirm = new->cl_confirm, 306 .confirm = new->cl_confirm,
@@ -308,10 +308,23 @@ int nfs40_walk_client_list(struct nfs_client *new,
308 int status = -NFS4ERR_STALE_CLIENTID; 308 int status = -NFS4ERR_STALE_CLIENTID;
309 309
310 spin_lock(&nn->nfs_client_lock); 310 spin_lock(&nn->nfs_client_lock);
311 list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { 311 list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
312 /* If "pos" isn't marked ready, we can't trust the 312 /* If "pos" isn't marked ready, we can't trust the
313 * remaining fields in "pos" */ 313 * remaining fields in "pos" */
314 if (pos->cl_cons_state < NFS_CS_READY) 314 if (pos->cl_cons_state > NFS_CS_READY) {
315 atomic_inc(&pos->cl_count);
316 spin_unlock(&nn->nfs_client_lock);
317
318 if (prev)
319 nfs_put_client(prev);
320 prev = pos;
321
322 status = nfs_wait_client_init_complete(pos);
323 spin_lock(&nn->nfs_client_lock);
324 if (status < 0)
325 continue;
326 }
327 if (pos->cl_cons_state != NFS_CS_READY)
315 continue; 328 continue;
316 329
317 if (pos->rpc_ops != new->rpc_ops) 330 if (pos->rpc_ops != new->rpc_ops)
@@ -423,16 +436,16 @@ int nfs41_walk_client_list(struct nfs_client *new,
423 struct rpc_cred *cred) 436 struct rpc_cred *cred)
424{ 437{
425 struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); 438 struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
426 struct nfs_client *pos, *n, *prev = NULL; 439 struct nfs_client *pos, *prev = NULL;
427 int status = -NFS4ERR_STALE_CLIENTID; 440 int status = -NFS4ERR_STALE_CLIENTID;
428 441
429 spin_lock(&nn->nfs_client_lock); 442 spin_lock(&nn->nfs_client_lock);
430 list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { 443 list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
431 /* If "pos" isn't marked ready, we can't trust the 444 /* If "pos" isn't marked ready, we can't trust the
432 * remaining fields in "pos", especially the client 445 * remaining fields in "pos", especially the client
433 * ID and serverowner fields. Wait for CREATE_SESSION 446 * ID and serverowner fields. Wait for CREATE_SESSION
434 * to finish. */ 447 * to finish. */
435 if (pos->cl_cons_state < NFS_CS_READY) { 448 if (pos->cl_cons_state > NFS_CS_READY) {
436 atomic_inc(&pos->cl_count); 449 atomic_inc(&pos->cl_count);
437 spin_unlock(&nn->nfs_client_lock); 450 spin_unlock(&nn->nfs_client_lock);
438 451
@@ -440,18 +453,17 @@ int nfs41_walk_client_list(struct nfs_client *new,
440 nfs_put_client(prev); 453 nfs_put_client(prev);
441 prev = pos; 454 prev = pos;
442 455
443 nfs4_schedule_lease_recovery(pos);
444 status = nfs_wait_client_init_complete(pos); 456 status = nfs_wait_client_init_complete(pos);
445 if (status < 0) { 457 if (status == 0) {
446 nfs_put_client(pos); 458 nfs4_schedule_lease_recovery(pos);
447 spin_lock(&nn->nfs_client_lock); 459 status = nfs4_wait_clnt_recover(pos);
448 continue;
449 } 460 }
450 status = pos->cl_cons_state;
451 spin_lock(&nn->nfs_client_lock); 461 spin_lock(&nn->nfs_client_lock);
452 if (status < 0) 462 if (status < 0)
453 continue; 463 continue;
454 } 464 }
465 if (pos->cl_cons_state != NFS_CS_READY)
466 continue;
455 467
456 if (pos->rpc_ops != new->rpc_ops) 468 if (pos->rpc_ops != new->rpc_ops)
457 continue; 469 continue;
@@ -469,17 +481,18 @@ int nfs41_walk_client_list(struct nfs_client *new,
469 continue; 481 continue;
470 482
471 atomic_inc(&pos->cl_count); 483 atomic_inc(&pos->cl_count);
472 spin_unlock(&nn->nfs_client_lock); 484 *result = pos;
485 status = 0;
473 dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", 486 dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
474 __func__, pos, atomic_read(&pos->cl_count)); 487 __func__, pos, atomic_read(&pos->cl_count));
475 488 break;
476 *result = pos;
477 return 0;
478 } 489 }
479 490
480 /* No matching nfs_client found. */ 491 /* No matching nfs_client found. */
481 spin_unlock(&nn->nfs_client_lock); 492 spin_unlock(&nn->nfs_client_lock);
482 dprintk("NFS: <-- %s status = %d\n", __func__, status); 493 dprintk("NFS: <-- %s status = %d\n", __func__, status);
494 if (prev)
495 nfs_put_client(prev);
483 return status; 496 return status;
484} 497}
485#endif /* CONFIG_NFS_V4_1 */ 498#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 49eeb044c109..4fb234d3aefb 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -129,7 +129,6 @@ static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo)
129{ 129{
130 if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) 130 if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
131 return; 131 return;
132 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags);
133 pnfs_return_layout(inode); 132 pnfs_return_layout(inode);
134} 133}
135 134
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b2671cb0f901..0ad025eb523b 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1046,6 +1046,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1046 /* Save the delegation */ 1046 /* Save the delegation */
1047 nfs4_stateid_copy(&stateid, &delegation->stateid); 1047 nfs4_stateid_copy(&stateid, &delegation->stateid);
1048 rcu_read_unlock(); 1048 rcu_read_unlock();
1049 nfs_release_seqid(opendata->o_arg.seqid);
1049 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1050 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1050 if (ret != 0) 1051 if (ret != 0)
1051 goto out; 1052 goto out;
@@ -2632,7 +2633,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2632 int status; 2633 int status;
2633 2634
2634 if (pnfs_ld_layoutret_on_setattr(inode)) 2635 if (pnfs_ld_layoutret_on_setattr(inode))
2635 pnfs_return_layout(inode); 2636 pnfs_commit_and_return_layout(inode);
2636 2637
2637 nfs_fattr_init(fattr); 2638 nfs_fattr_init(fattr);
2638 2639
@@ -6416,22 +6417,8 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
6416static void nfs4_layoutcommit_release(void *calldata) 6417static void nfs4_layoutcommit_release(void *calldata)
6417{ 6418{
6418 struct nfs4_layoutcommit_data *data = calldata; 6419 struct nfs4_layoutcommit_data *data = calldata;
6419 struct pnfs_layout_segment *lseg, *tmp;
6420 unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
6421 6420
6422 pnfs_cleanup_layoutcommit(data); 6421 pnfs_cleanup_layoutcommit(data);
6423 /* Matched by references in pnfs_set_layoutcommit */
6424 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
6425 list_del_init(&lseg->pls_lc_list);
6426 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
6427 &lseg->pls_flags))
6428 pnfs_put_lseg(lseg);
6429 }
6430
6431 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
6432 smp_mb__after_clear_bit();
6433 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
6434
6435 put_rpccred(data->cred); 6422 put_rpccred(data->cred);
6436 kfree(data); 6423 kfree(data);
6437} 6424}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 6ace365c6334..d41a3518509f 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1886,7 +1886,13 @@ again:
1886 status = PTR_ERR(clnt); 1886 status = PTR_ERR(clnt);
1887 break; 1887 break;
1888 } 1888 }
1889 clp->cl_rpcclient = clnt; 1889 /* Note: this is safe because we haven't yet marked the
1890 * client as ready, so we are the only user of
1891 * clp->cl_rpcclient
1892 */
1893 clnt = xchg(&clp->cl_rpcclient, clnt);
1894 rpc_shutdown_client(clnt);
1895 clnt = clp->cl_rpcclient;
1890 goto again; 1896 goto again;
1891 1897
1892 case -NFS4ERR_MINOR_VERS_MISMATCH: 1898 case -NFS4ERR_MINOR_VERS_MISMATCH:
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 48ac5aad6258..4bdffe0ba025 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -417,6 +417,16 @@ should_free_lseg(struct pnfs_layout_range *lseg_range,
417 lo_seg_intersecting(lseg_range, recall_range); 417 lo_seg_intersecting(lseg_range, recall_range);
418} 418}
419 419
420static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
421 struct list_head *tmp_list)
422{
423 if (!atomic_dec_and_test(&lseg->pls_refcount))
424 return false;
425 pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
426 list_add(&lseg->pls_list, tmp_list);
427 return true;
428}
429
420/* Returns 1 if lseg is removed from list, 0 otherwise */ 430/* Returns 1 if lseg is removed from list, 0 otherwise */
421static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, 431static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
422 struct list_head *tmp_list) 432 struct list_head *tmp_list)
@@ -430,11 +440,8 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
430 */ 440 */
431 dprintk("%s: lseg %p ref %d\n", __func__, lseg, 441 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
432 atomic_read(&lseg->pls_refcount)); 442 atomic_read(&lseg->pls_refcount));
433 if (atomic_dec_and_test(&lseg->pls_refcount)) { 443 if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
434 pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
435 list_add(&lseg->pls_list, tmp_list);
436 rv = 1; 444 rv = 1;
437 }
438 } 445 }
439 return rv; 446 return rv;
440} 447}
@@ -777,6 +784,21 @@ send_layoutget(struct pnfs_layout_hdr *lo,
777 return lseg; 784 return lseg;
778} 785}
779 786
787static void pnfs_clear_layoutcommit(struct inode *inode,
788 struct list_head *head)
789{
790 struct nfs_inode *nfsi = NFS_I(inode);
791 struct pnfs_layout_segment *lseg, *tmp;
792
793 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
794 return;
795 list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
796 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
797 continue;
798 pnfs_lseg_dec_and_remove_zero(lseg, head);
799 }
800}
801
780/* 802/*
781 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr 803 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
782 * when the layout segment list is empty. 804 * when the layout segment list is empty.
@@ -808,6 +830,7 @@ _pnfs_return_layout(struct inode *ino)
808 /* Reference matched in nfs4_layoutreturn_release */ 830 /* Reference matched in nfs4_layoutreturn_release */
809 pnfs_get_layout_hdr(lo); 831 pnfs_get_layout_hdr(lo);
810 empty = list_empty(&lo->plh_segs); 832 empty = list_empty(&lo->plh_segs);
833 pnfs_clear_layoutcommit(ino, &tmp_list);
811 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL); 834 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
812 /* Don't send a LAYOUTRETURN if list was initially empty */ 835 /* Don't send a LAYOUTRETURN if list was initially empty */
813 if (empty) { 836 if (empty) {
@@ -820,8 +843,6 @@ _pnfs_return_layout(struct inode *ino)
820 spin_unlock(&ino->i_lock); 843 spin_unlock(&ino->i_lock);
821 pnfs_free_lseg_list(&tmp_list); 844 pnfs_free_lseg_list(&tmp_list);
822 845
823 WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
824
825 lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); 846 lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
826 if (unlikely(lrp == NULL)) { 847 if (unlikely(lrp == NULL)) {
827 status = -ENOMEM; 848 status = -ENOMEM;
@@ -845,6 +866,33 @@ out:
845} 866}
846EXPORT_SYMBOL_GPL(_pnfs_return_layout); 867EXPORT_SYMBOL_GPL(_pnfs_return_layout);
847 868
869int
870pnfs_commit_and_return_layout(struct inode *inode)
871{
872 struct pnfs_layout_hdr *lo;
873 int ret;
874
875 spin_lock(&inode->i_lock);
876 lo = NFS_I(inode)->layout;
877 if (lo == NULL) {
878 spin_unlock(&inode->i_lock);
879 return 0;
880 }
881 pnfs_get_layout_hdr(lo);
882 /* Block new layoutgets and read/write to ds */
883 lo->plh_block_lgets++;
884 spin_unlock(&inode->i_lock);
885 filemap_fdatawait(inode->i_mapping);
886 ret = pnfs_layoutcommit_inode(inode, true);
887 if (ret == 0)
888 ret = _pnfs_return_layout(inode);
889 spin_lock(&inode->i_lock);
890 lo->plh_block_lgets--;
891 spin_unlock(&inode->i_lock);
892 pnfs_put_layout_hdr(lo);
893 return ret;
894}
895
848bool pnfs_roc(struct inode *ino) 896bool pnfs_roc(struct inode *ino)
849{ 897{
850 struct pnfs_layout_hdr *lo; 898 struct pnfs_layout_hdr *lo;
@@ -1458,7 +1506,6 @@ static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
1458 dprintk("pnfs write error = %d\n", hdr->pnfs_error); 1506 dprintk("pnfs write error = %d\n", hdr->pnfs_error);
1459 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & 1507 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1460 PNFS_LAYOUTRET_ON_ERROR) { 1508 PNFS_LAYOUTRET_ON_ERROR) {
1461 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
1462 pnfs_return_layout(hdr->inode); 1509 pnfs_return_layout(hdr->inode);
1463 } 1510 }
1464 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) 1511 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
@@ -1613,7 +1660,6 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
1613 dprintk("pnfs read error = %d\n", hdr->pnfs_error); 1660 dprintk("pnfs read error = %d\n", hdr->pnfs_error);
1614 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & 1661 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1615 PNFS_LAYOUTRET_ON_ERROR) { 1662 PNFS_LAYOUTRET_ON_ERROR) {
1616 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
1617 pnfs_return_layout(hdr->inode); 1663 pnfs_return_layout(hdr->inode);
1618 } 1664 }
1619 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) 1665 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
@@ -1746,11 +1792,27 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
1746 1792
1747 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) { 1793 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
1748 if (lseg->pls_range.iomode == IOMODE_RW && 1794 if (lseg->pls_range.iomode == IOMODE_RW &&
1749 test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) 1795 test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1750 list_add(&lseg->pls_lc_list, listp); 1796 list_add(&lseg->pls_lc_list, listp);
1751 } 1797 }
1752} 1798}
1753 1799
1800static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
1801{
1802 struct pnfs_layout_segment *lseg, *tmp;
1803 unsigned long *bitlock = &NFS_I(inode)->flags;
1804
1805 /* Matched by references in pnfs_set_layoutcommit */
1806 list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
1807 list_del_init(&lseg->pls_lc_list);
1808 pnfs_put_lseg(lseg);
1809 }
1810
1811 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
1812 smp_mb__after_clear_bit();
1813 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
1814}
1815
1754void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) 1816void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
1755{ 1817{
1756 pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode); 1818 pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
@@ -1795,6 +1857,7 @@ void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
1795 1857
1796 if (nfss->pnfs_curr_ld->cleanup_layoutcommit) 1858 if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
1797 nfss->pnfs_curr_ld->cleanup_layoutcommit(data); 1859 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
1860 pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
1798} 1861}
1799 1862
1800/* 1863/*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 94ba80417748..f5f8a470a647 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -219,6 +219,7 @@ void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
219void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); 219void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
220int pnfs_layoutcommit_inode(struct inode *inode, bool sync); 220int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
221int _pnfs_return_layout(struct inode *); 221int _pnfs_return_layout(struct inode *);
222int pnfs_commit_and_return_layout(struct inode *);
222void pnfs_ld_write_done(struct nfs_write_data *); 223void pnfs_ld_write_done(struct nfs_write_data *);
223void pnfs_ld_read_done(struct nfs_read_data *); 224void pnfs_ld_read_done(struct nfs_read_data *);
224struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino, 225struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
@@ -407,6 +408,11 @@ static inline int pnfs_return_layout(struct inode *ino)
407 return 0; 408 return 0;
408} 409}
409 410
411static inline int pnfs_commit_and_return_layout(struct inode *inode)
412{
413 return 0;
414}
415
410static inline bool 416static inline bool
411pnfs_ld_layoutret_on_setattr(struct inode *inode) 417pnfs_ld_layoutret_on_setattr(struct inode *inode)
412{ 418{
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 01168865dd37..a2720071f282 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -264,7 +264,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
264 iattr->ia_valid |= ATTR_SIZE; 264 iattr->ia_valid |= ATTR_SIZE;
265 } 265 }
266 if (bmval[0] & FATTR4_WORD0_ACL) { 266 if (bmval[0] & FATTR4_WORD0_ACL) {
267 int nace; 267 u32 nace;
268 struct nfs4_ace *ace; 268 struct nfs4_ace *ace;
269 269
270 READ_BUF(4); len += 4; 270 READ_BUF(4); len += 4;
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 62c1ee128aeb..ca05f6dc3544 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -102,7 +102,8 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
102{ 102{
103 if (rp->c_type == RC_REPLBUFF) 103 if (rp->c_type == RC_REPLBUFF)
104 kfree(rp->c_replvec.iov_base); 104 kfree(rp->c_replvec.iov_base);
105 hlist_del(&rp->c_hash); 105 if (!hlist_unhashed(&rp->c_hash))
106 hlist_del(&rp->c_hash);
106 list_del(&rp->c_lru); 107 list_del(&rp->c_lru);
107 --num_drc_entries; 108 --num_drc_entries;
108 kmem_cache_free(drc_slab, rp); 109 kmem_cache_free(drc_slab, rp);
@@ -118,6 +119,10 @@ nfsd_reply_cache_free(struct svc_cacherep *rp)
118 119
119int nfsd_reply_cache_init(void) 120int nfsd_reply_cache_init(void)
120{ 121{
122 INIT_LIST_HEAD(&lru_head);
123 max_drc_entries = nfsd_cache_size_limit();
124 num_drc_entries = 0;
125
121 register_shrinker(&nfsd_reply_cache_shrinker); 126 register_shrinker(&nfsd_reply_cache_shrinker);
122 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), 127 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
123 0, 0, NULL); 128 0, 0, NULL);
@@ -128,10 +133,6 @@ int nfsd_reply_cache_init(void)
128 if (!cache_hash) 133 if (!cache_hash)
129 goto out_nomem; 134 goto out_nomem;
130 135
131 INIT_LIST_HEAD(&lru_head);
132 max_drc_entries = nfsd_cache_size_limit();
133 num_drc_entries = 0;
134
135 return 0; 136 return 0;
136out_nomem: 137out_nomem:
137 printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); 138 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 2a7eb536de0b..2b2e2396a869 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1013,6 +1013,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
1013 int host_err; 1013 int host_err;
1014 int stable = *stablep; 1014 int stable = *stablep;
1015 int use_wgather; 1015 int use_wgather;
1016 loff_t pos = offset;
1016 1017
1017 dentry = file->f_path.dentry; 1018 dentry = file->f_path.dentry;
1018 inode = dentry->d_inode; 1019 inode = dentry->d_inode;
@@ -1025,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
1025 1026
1026 /* Write the data. */ 1027 /* Write the data. */
1027 oldfs = get_fs(); set_fs(KERNEL_DS); 1028 oldfs = get_fs(); set_fs(KERNEL_DS);
1028 host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); 1029 host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
1029 set_fs(oldfs); 1030 set_fs(oldfs);
1030 if (host_err < 0) 1031 if (host_err < 0)
1031 goto out_nfserr; 1032 goto out_nfserr;
diff --git a/fs/pnode.c b/fs/pnode.c
index 3e000a51ac0d..8b29d2164da6 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -9,6 +9,7 @@
9#include <linux/mnt_namespace.h> 9#include <linux/mnt_namespace.h>
10#include <linux/mount.h> 10#include <linux/mount.h>
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/nsproxy.h>
12#include "internal.h" 13#include "internal.h"
13#include "pnode.h" 14#include "pnode.h"
14 15
@@ -220,6 +221,7 @@ static struct mount *get_source(struct mount *dest,
220int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry, 221int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
221 struct mount *source_mnt, struct list_head *tree_list) 222 struct mount *source_mnt, struct list_head *tree_list)
222{ 223{
224 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
223 struct mount *m, *child; 225 struct mount *m, *child;
224 int ret = 0; 226 int ret = 0;
225 struct mount *prev_dest_mnt = dest_mnt; 227 struct mount *prev_dest_mnt = dest_mnt;
@@ -237,6 +239,10 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
237 239
238 source = get_source(m, prev_dest_mnt, prev_src_mnt, &type); 240 source = get_source(m, prev_dest_mnt, prev_src_mnt, &type);
239 241
242 /* Notice when we are propagating across user namespaces */
243 if (m->mnt_ns->user_ns != user_ns)
244 type |= CL_UNPRIVILEGED;
245
240 child = copy_tree(source, source->mnt.mnt_root, type); 246 child = copy_tree(source, source->mnt.mnt_root, type);
241 if (IS_ERR(child)) { 247 if (IS_ERR(child)) {
242 ret = PTR_ERR(child); 248 ret = PTR_ERR(child);
diff --git a/fs/pnode.h b/fs/pnode.h
index 19b853a3445c..a0493d5ebfbf 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -23,6 +23,7 @@
23#define CL_MAKE_SHARED 0x08 23#define CL_MAKE_SHARED 0x08
24#define CL_PRIVATE 0x10 24#define CL_PRIVATE 0x10
25#define CL_SHARED_TO_SLAVE 0x20 25#define CL_SHARED_TO_SLAVE 0x20
26#define CL_UNPRIVILEGED 0x40
26 27
27static inline void set_mnt_shared(struct mount *mnt) 28static inline void set_mnt_shared(struct mount *mnt)
28{ 29{
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 4b3b3ffb52f1..21e1a8f1659d 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -755,37 +755,8 @@ void pde_put(struct proc_dir_entry *pde)
755 free_proc_entry(pde); 755 free_proc_entry(pde);
756} 756}
757 757
758/* 758static void entry_rundown(struct proc_dir_entry *de)
759 * Remove a /proc entry and free it if it's not currently in use.
760 */
761void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
762{ 759{
763 struct proc_dir_entry **p;
764 struct proc_dir_entry *de = NULL;
765 const char *fn = name;
766 unsigned int len;
767
768 spin_lock(&proc_subdir_lock);
769 if (__xlate_proc_name(name, &parent, &fn) != 0) {
770 spin_unlock(&proc_subdir_lock);
771 return;
772 }
773 len = strlen(fn);
774
775 for (p = &parent->subdir; *p; p=&(*p)->next ) {
776 if (proc_match(len, fn, *p)) {
777 de = *p;
778 *p = de->next;
779 de->next = NULL;
780 break;
781 }
782 }
783 spin_unlock(&proc_subdir_lock);
784 if (!de) {
785 WARN(1, "name '%s'\n", name);
786 return;
787 }
788
789 spin_lock(&de->pde_unload_lock); 760 spin_lock(&de->pde_unload_lock);
790 /* 761 /*
791 * Stop accepting new callers into module. If you're 762 * Stop accepting new callers into module. If you're
@@ -817,6 +788,40 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
817 spin_lock(&de->pde_unload_lock); 788 spin_lock(&de->pde_unload_lock);
818 } 789 }
819 spin_unlock(&de->pde_unload_lock); 790 spin_unlock(&de->pde_unload_lock);
791}
792
793/*
794 * Remove a /proc entry and free it if it's not currently in use.
795 */
796void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
797{
798 struct proc_dir_entry **p;
799 struct proc_dir_entry *de = NULL;
800 const char *fn = name;
801 unsigned int len;
802
803 spin_lock(&proc_subdir_lock);
804 if (__xlate_proc_name(name, &parent, &fn) != 0) {
805 spin_unlock(&proc_subdir_lock);
806 return;
807 }
808 len = strlen(fn);
809
810 for (p = &parent->subdir; *p; p=&(*p)->next ) {
811 if (proc_match(len, fn, *p)) {
812 de = *p;
813 *p = de->next;
814 de->next = NULL;
815 break;
816 }
817 }
818 spin_unlock(&proc_subdir_lock);
819 if (!de) {
820 WARN(1, "name '%s'\n", name);
821 return;
822 }
823
824 entry_rundown(de);
820 825
821 if (S_ISDIR(de->mode)) 826 if (S_ISDIR(de->mode))
822 parent->nlink--; 827 parent->nlink--;
@@ -827,3 +832,57 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
827 pde_put(de); 832 pde_put(de);
828} 833}
829EXPORT_SYMBOL(remove_proc_entry); 834EXPORT_SYMBOL(remove_proc_entry);
835
836int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
837{
838 struct proc_dir_entry **p;
839 struct proc_dir_entry *root = NULL, *de, *next;
840 const char *fn = name;
841 unsigned int len;
842
843 spin_lock(&proc_subdir_lock);
844 if (__xlate_proc_name(name, &parent, &fn) != 0) {
845 spin_unlock(&proc_subdir_lock);
846 return -ENOENT;
847 }
848 len = strlen(fn);
849
850 for (p = &parent->subdir; *p; p=&(*p)->next ) {
851 if (proc_match(len, fn, *p)) {
852 root = *p;
853 *p = root->next;
854 root->next = NULL;
855 break;
856 }
857 }
858 if (!root) {
859 spin_unlock(&proc_subdir_lock);
860 return -ENOENT;
861 }
862 de = root;
863 while (1) {
864 next = de->subdir;
865 if (next) {
866 de->subdir = next->next;
867 next->next = NULL;
868 de = next;
869 continue;
870 }
871 spin_unlock(&proc_subdir_lock);
872
873 entry_rundown(de);
874 next = de->parent;
875 if (S_ISDIR(de->mode))
876 next->nlink--;
877 de->nlink = 0;
878 if (de == root)
879 break;
880 pde_put(de);
881
882 spin_lock(&proc_subdir_lock);
883 de = next;
884 }
885 pde_put(root);
886 return 0;
887}
888EXPORT_SYMBOL(remove_proc_subtree);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index c6e9fac26bac..9c7fab1d23f0 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -16,6 +16,7 @@
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/bitops.h> 18#include <linux/bitops.h>
19#include <linux/user_namespace.h>
19#include <linux/mount.h> 20#include <linux/mount.h>
20#include <linux/pid_namespace.h> 21#include <linux/pid_namespace.h>
21#include <linux/parser.h> 22#include <linux/parser.h>
@@ -108,6 +109,9 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
108 } else { 109 } else {
109 ns = task_active_pid_ns(current); 110 ns = task_active_pid_ns(current);
110 options = data; 111 options = data;
112
113 if (!current_user_ns()->may_mount_proc)
114 return ERR_PTR(-EPERM);
111 } 115 }
112 116
113 sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns); 117 sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns);
diff --git a/fs/read_write.c b/fs/read_write.c
index a698eff457fb..e6ddc8dceb96 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -17,6 +17,7 @@
17#include <linux/splice.h> 17#include <linux/splice.h>
18#include <linux/compat.h> 18#include <linux/compat.h>
19#include "read_write.h" 19#include "read_write.h"
20#include "internal.h"
20 21
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
22#include <asm/unistd.h> 23#include <asm/unistd.h>
@@ -417,6 +418,33 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
417 418
418EXPORT_SYMBOL(do_sync_write); 419EXPORT_SYMBOL(do_sync_write);
419 420
421ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
422{
423 mm_segment_t old_fs;
424 const char __user *p;
425 ssize_t ret;
426
427 if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
428 return -EINVAL;
429
430 old_fs = get_fs();
431 set_fs(get_ds());
432 p = (__force const char __user *)buf;
433 if (count > MAX_RW_COUNT)
434 count = MAX_RW_COUNT;
435 if (file->f_op->write)
436 ret = file->f_op->write(file, p, count, pos);
437 else
438 ret = do_sync_write(file, p, count, pos);
439 set_fs(old_fs);
440 if (ret > 0) {
441 fsnotify_modify(file);
442 add_wchar(current, ret);
443 }
444 inc_syscw(current);
445 return ret;
446}
447
420ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) 448ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
421{ 449{
422 ssize_t ret; 450 ssize_t ret;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index c196369fe408..4cce1d9552fb 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset,
187 if (dbuf->count == ARRAY_SIZE(dbuf->dentries)) 187 if (dbuf->count == ARRAY_SIZE(dbuf->dentries))
188 return -ENOSPC; 188 return -ENOSPC;
189 189
190 if (name[0] == '.' && (name[1] == '\0' || 190 if (name[0] == '.' && (namelen < 2 ||
191 (name[1] == '.' && name[2] == '\0'))) 191 (namelen == 2 && name[1] == '.')))
192 return 0; 192 return 0;
193 193
194 dentry = lookup_one_len(name, dbuf->xadir, namelen); 194 dentry = lookup_one_len(name, dbuf->xadir, namelen);
diff --git a/fs/splice.c b/fs/splice.c
index 718bd0056384..29e394e49ddd 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -31,6 +31,7 @@
31#include <linux/security.h> 31#include <linux/security.h>
32#include <linux/gfp.h> 32#include <linux/gfp.h>
33#include <linux/socket.h> 33#include <linux/socket.h>
34#include "internal.h"
34 35
35/* 36/*
36 * Attempt to steal a page from a pipe buffer. This should perhaps go into 37 * Attempt to steal a page from a pipe buffer. This should perhaps go into
@@ -1048,9 +1049,10 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1048{ 1049{
1049 int ret; 1050 int ret;
1050 void *data; 1051 void *data;
1052 loff_t tmp = sd->pos;
1051 1053
1052 data = buf->ops->map(pipe, buf, 0); 1054 data = buf->ops->map(pipe, buf, 0);
1053 ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos); 1055 ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
1054 buf->ops->unmap(pipe, buf, data); 1056 buf->ops->unmap(pipe, buf, data);
1055 1057
1056 return ret; 1058 return ret;
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 2fbdff6be25c..e14512678c9b 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -1020,6 +1020,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
1020 ino = parent_sd->s_ino; 1020 ino = parent_sd->s_ino;
1021 if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0) 1021 if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
1022 filp->f_pos++; 1022 filp->f_pos++;
1023 else
1024 return 0;
1023 } 1025 }
1024 if (filp->f_pos == 1) { 1026 if (filp->f_pos == 1) {
1025 if (parent_sd->s_parent) 1027 if (parent_sd->s_parent)
@@ -1028,6 +1030,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
1028 ino = parent_sd->s_ino; 1030 ino = parent_sd->s_ino;
1029 if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0) 1031 if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0)
1030 filp->f_pos++; 1032 filp->f_pos++;
1033 else
1034 return 0;
1031 } 1035 }
1032 mutex_lock(&sysfs_mutex); 1036 mutex_lock(&sysfs_mutex);
1033 for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos); 1037 for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
@@ -1058,10 +1062,21 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
1058 return 0; 1062 return 0;
1059} 1063}
1060 1064
1065static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence)
1066{
1067 struct inode *inode = file_inode(file);
1068 loff_t ret;
1069
1070 mutex_lock(&inode->i_mutex);
1071 ret = generic_file_llseek(file, offset, whence);
1072 mutex_unlock(&inode->i_mutex);
1073
1074 return ret;
1075}
1061 1076
1062const struct file_operations sysfs_dir_operations = { 1077const struct file_operations sysfs_dir_operations = {
1063 .read = generic_read_dir, 1078 .read = generic_read_dir,
1064 .readdir = sysfs_readdir, 1079 .readdir = sysfs_readdir,
1065 .release = sysfs_dir_release, 1080 .release = sysfs_dir_release,
1066 .llseek = generic_file_llseek, 1081 .llseek = sysfs_dir_llseek,
1067}; 1082};
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 8d924b5ec733..afd83273e6ce 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/magic.h> 20#include <linux/magic.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/user_namespace.h>
22 23
23#include "sysfs.h" 24#include "sysfs.h"
24 25
@@ -111,6 +112,9 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
111 struct super_block *sb; 112 struct super_block *sb;
112 int error; 113 int error;
113 114
115 if (!(flags & MS_KERNMOUNT) && !current_user_ns()->may_mount_sysfs)
116 return ERR_PTR(-EPERM);
117
114 info = kzalloc(sizeof(*info), GFP_KERNEL); 118 info = kzalloc(sizeof(*info), GFP_KERNEL);
115 if (!info) 119 if (!info)
116 return ERR_PTR(-ENOMEM); 120 return ERR_PTR(-ENOMEM);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index ac838b844936..f21acf0ef01f 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1568,6 +1568,12 @@ static int ubifs_remount_rw(struct ubifs_info *c)
1568 c->remounting_rw = 1; 1568 c->remounting_rw = 1;
1569 c->ro_mount = 0; 1569 c->ro_mount = 0;
1570 1570
1571 if (c->space_fixup) {
1572 err = ubifs_fixup_free_space(c);
1573 if (err)
1574 return err;
1575 }
1576
1571 err = check_free_space(c); 1577 err = check_free_space(c);
1572 if (err) 1578 if (err)
1573 goto out; 1579 goto out;
@@ -1684,12 +1690,6 @@ static int ubifs_remount_rw(struct ubifs_info *c)
1684 err = dbg_check_space_info(c); 1690 err = dbg_check_space_info(c);
1685 } 1691 }
1686 1692
1687 if (c->space_fixup) {
1688 err = ubifs_fixup_free_space(c);
1689 if (err)
1690 goto out;
1691 }
1692
1693 mutex_unlock(&c->umount_mutex); 1693 mutex_unlock(&c->umount_mutex);
1694 return err; 1694 return err;
1695 1695
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 25f01d0bc149..b1b1fa6ffffe 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -99,7 +99,12 @@ struct mmu_gather {
99 unsigned int need_flush : 1, /* Did free PTEs */ 99 unsigned int need_flush : 1, /* Did free PTEs */
100 fast_mode : 1; /* No batching */ 100 fast_mode : 1; /* No batching */
101 101
102 unsigned int fullmm; 102 /* we are in the middle of an operation to clear
103 * a full mm and can make some optimizations */
104 unsigned int fullmm : 1,
105 /* we have performed an operation which
106 * requires a complete flush of the tlb */
107 need_flush_all : 1;
103 108
104 struct mmu_gather_batch *active; 109 struct mmu_gather_batch *active;
105 struct mmu_gather_batch local; 110 struct mmu_gather_batch local;
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 8f7a3d68371a..ee0bd9524055 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -954,7 +954,7 @@ static inline int atapi_cdb_len(const u16 *dev_id)
954 } 954 }
955} 955}
956 956
957static inline bool atapi_command_packet_set(const u16 *dev_id) 957static inline int atapi_command_packet_set(const u16 *dev_id)
958{ 958{
959 return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; 959 return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f;
960} 960}
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 98503b792369..d9a4f7f40f32 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -35,6 +35,7 @@ struct cpu_vfs_cap_data {
35#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t)) 35#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
36 36
37 37
38struct file;
38struct inode; 39struct inode;
39struct dentry; 40struct dentry;
40struct user_namespace; 41struct user_namespace;
@@ -211,6 +212,7 @@ extern bool capable(int cap);
211extern bool ns_capable(struct user_namespace *ns, int cap); 212extern bool ns_capable(struct user_namespace *ns, int cap);
212extern bool nsown_capable(int cap); 213extern bool nsown_capable(int cap);
213extern bool inode_capable(const struct inode *inode, int cap); 214extern bool inode_capable(const struct inode *inode, int cap);
215extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
214 216
215/* audit system wants to get cap info from files as well */ 217/* audit system wants to get cap info from files as well */
216extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); 218extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 76a87fb57ac2..377cd8c3395e 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -141,11 +141,11 @@ typedef struct {
141} compat_sigset_t; 141} compat_sigset_t;
142 142
143struct compat_sigaction { 143struct compat_sigaction {
144#ifndef __ARCH_HAS_ODD_SIGACTION 144#ifndef __ARCH_HAS_IRIX_SIGACTION
145 compat_uptr_t sa_handler; 145 compat_uptr_t sa_handler;
146 compat_ulong_t sa_flags; 146 compat_ulong_t sa_flags;
147#else 147#else
148 compat_ulong_t sa_flags; 148 compat_uint_t sa_flags;
149 compat_uptr_t sa_handler; 149 compat_uptr_t sa_handler;
150#endif 150#endif
151#ifdef __ARCH_HAS_SA_RESTORER 151#ifdef __ARCH_HAS_SA_RESTORER
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index a975de1ff59f..3bd46f766751 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -51,7 +51,7 @@ struct task_struct;
51extern void debug_show_all_locks(void); 51extern void debug_show_all_locks(void);
52extern void debug_show_held_locks(struct task_struct *task); 52extern void debug_show_held_locks(struct task_struct *task);
53extern void debug_check_no_locks_freed(const void *from, unsigned long len); 53extern void debug_check_no_locks_freed(const void *from, unsigned long len);
54extern void debug_check_no_locks_held(void); 54extern void debug_check_no_locks_held(struct task_struct *task);
55#else 55#else
56static inline void debug_show_all_locks(void) 56static inline void debug_show_all_locks(void)
57{ 57{
@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len)
67} 67}
68 68
69static inline void 69static inline void
70debug_check_no_locks_held(void) 70debug_check_no_locks_held(struct task_struct *task)
71{ 71{
72} 72}
73#endif 73#endif
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index e83ef39b3bea..fe8c4476f7e4 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -213,7 +213,7 @@ struct devfreq_simple_ondemand_data {
213#endif 213#endif
214 214
215#else /* !CONFIG_PM_DEVFREQ */ 215#else /* !CONFIG_PM_DEVFREQ */
216static struct devfreq *devfreq_add_device(struct device *dev, 216static inline struct devfreq *devfreq_add_device(struct device *dev,
217 struct devfreq_dev_profile *profile, 217 struct devfreq_dev_profile *profile,
218 const char *governor_name, 218 const char *governor_name,
219 void *data) 219 void *data)
@@ -221,34 +221,34 @@ static struct devfreq *devfreq_add_device(struct device *dev,
221 return NULL; 221 return NULL;
222} 222}
223 223
224static int devfreq_remove_device(struct devfreq *devfreq) 224static inline int devfreq_remove_device(struct devfreq *devfreq)
225{ 225{
226 return 0; 226 return 0;
227} 227}
228 228
229static int devfreq_suspend_device(struct devfreq *devfreq) 229static inline int devfreq_suspend_device(struct devfreq *devfreq)
230{ 230{
231 return 0; 231 return 0;
232} 232}
233 233
234static int devfreq_resume_device(struct devfreq *devfreq) 234static inline int devfreq_resume_device(struct devfreq *devfreq)
235{ 235{
236 return 0; 236 return 0;
237} 237}
238 238
239static struct opp *devfreq_recommended_opp(struct device *dev, 239static inline struct opp *devfreq_recommended_opp(struct device *dev,
240 unsigned long *freq, u32 flags) 240 unsigned long *freq, u32 flags)
241{ 241{
242 return -EINVAL; 242 return ERR_PTR(-EINVAL);
243} 243}
244 244
245static int devfreq_register_opp_notifier(struct device *dev, 245static inline int devfreq_register_opp_notifier(struct device *dev,
246 struct devfreq *devfreq) 246 struct devfreq *devfreq)
247{ 247{
248 return -EINVAL; 248 return -EINVAL;
249} 249}
250 250
251static int devfreq_unregister_opp_notifier(struct device *dev, 251static inline int devfreq_unregister_opp_notifier(struct device *dev,
252 struct devfreq *devfreq) 252 struct devfreq *devfreq)
253{ 253{
254 return -EINVAL; 254 return -EINVAL;
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 043a5cf8b5ba..e70df40d84f6 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -3,7 +3,6 @@
3#ifndef FREEZER_H_INCLUDED 3#ifndef FREEZER_H_INCLUDED
4#define FREEZER_H_INCLUDED 4#define FREEZER_H_INCLUDED
5 5
6#include <linux/debug_locks.h>
7#include <linux/sched.h> 6#include <linux/sched.h>
8#include <linux/wait.h> 7#include <linux/wait.h>
9#include <linux/atomic.h> 8#include <linux/atomic.h>
@@ -49,8 +48,6 @@ extern void thaw_kernel_threads(void);
49 48
50static inline bool try_to_freeze(void) 49static inline bool try_to_freeze(void)
51{ 50{
52 if (!(current->flags & PF_NOFREEZE))
53 debug_check_no_locks_held();
54 might_sleep(); 51 might_sleep();
55 if (likely(!freezing(current))) 52 if (likely(!freezing(current)))
56 return false; 53 return false;
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 729eded4b24f..2b93a9a5a1e6 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -50,4 +50,6 @@ static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
50 spin_unlock(&fs->lock); 50 spin_unlock(&fs->lock);
51} 51}
52 52
53extern bool current_chrooted(void);
54
53#endif /* _LINUX_FS_STRUCT_H */ 55#endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index e5ca8ef50e9b..52da2a250795 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -89,6 +89,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
89 * that the call back has its own recursion protection. If it does 89 * that the call back has its own recursion protection. If it does
90 * not set this, then the ftrace infrastructure will add recursion 90 * not set this, then the ftrace infrastructure will add recursion
91 * protection for the caller. 91 * protection for the caller.
92 * STUB - The ftrace_ops is just a place holder.
92 */ 93 */
93enum { 94enum {
94 FTRACE_OPS_FL_ENABLED = 1 << 0, 95 FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -98,6 +99,7 @@ enum {
98 FTRACE_OPS_FL_SAVE_REGS = 1 << 4, 99 FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
99 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, 100 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
100 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, 101 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
102 FTRACE_OPS_FL_STUB = 1 << 7,
101}; 103};
102 104
103struct ftrace_ops { 105struct ftrace_ops {
@@ -394,7 +396,6 @@ ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
394 size_t cnt, loff_t *ppos); 396 size_t cnt, loff_t *ppos);
395ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 397ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
396 size_t cnt, loff_t *ppos); 398 size_t cnt, loff_t *ppos);
397loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence);
398int ftrace_regex_release(struct inode *inode, struct file *file); 399int ftrace_regex_release(struct inode *inode, struct file *file);
399 400
400void __init 401void __init
@@ -567,6 +568,8 @@ static inline int
567ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } 568ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
568#endif /* CONFIG_DYNAMIC_FTRACE */ 569#endif /* CONFIG_DYNAMIC_FTRACE */
569 570
571loff_t ftrace_filter_lseek(struct file *file, loff_t offset, int whence);
572
570/* totally disable ftrace - can not re-enable after this */ 573/* totally disable ftrace - can not re-enable after this */
571void ftrace_kill(void); 574void ftrace_kill(void);
572 575
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index cad77fe09d77..c13958251927 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -518,7 +518,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
518int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 518int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
519 void *data, unsigned long len); 519 void *data, unsigned long len);
520int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 520int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
521 gpa_t gpa); 521 gpa_t gpa, unsigned long len);
522int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); 522int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
523int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 523int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
524struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 524struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index fa7cc7244cbd..b0bcce0ddc95 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -71,6 +71,7 @@ struct gfn_to_hva_cache {
71 u64 generation; 71 u64 generation;
72 gpa_t gpa; 72 gpa_t gpa;
73 unsigned long hva; 73 unsigned long hva;
74 unsigned long len;
74 struct kvm_memory_slot *memslot; 75 struct kvm_memory_slot *memslot;
75}; 76};
76 77
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 91c9d109e5f1..eae7a053dc51 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -398,6 +398,7 @@ enum {
398 ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ 398 ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
399 ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ 399 ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
400 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ 400 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
401 ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
401 402
402 /* DMA mask for user DMA control: User visible values; DO NOT 403 /* DMA mask for user DMA control: User visible values; DO NOT
403 renumber */ 404 renumber */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 5b18ecde69b5..1aa4f13cdfa6 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -106,6 +106,29 @@ enum max77693_muic_reg {
106 MAX77693_MUIC_REG_END, 106 MAX77693_MUIC_REG_END,
107}; 107};
108 108
109/* MAX77693 INTMASK1~2 Register */
110#define INTMASK1_ADC1K_SHIFT 3
111#define INTMASK1_ADCERR_SHIFT 2
112#define INTMASK1_ADCLOW_SHIFT 1
113#define INTMASK1_ADC_SHIFT 0
114#define INTMASK1_ADC1K_MASK (1 << INTMASK1_ADC1K_SHIFT)
115#define INTMASK1_ADCERR_MASK (1 << INTMASK1_ADCERR_SHIFT)
116#define INTMASK1_ADCLOW_MASK (1 << INTMASK1_ADCLOW_SHIFT)
117#define INTMASK1_ADC_MASK (1 << INTMASK1_ADC_SHIFT)
118
119#define INTMASK2_VIDRM_SHIFT 5
120#define INTMASK2_VBVOLT_SHIFT 4
121#define INTMASK2_DXOVP_SHIFT 3
122#define INTMASK2_DCDTMR_SHIFT 2
123#define INTMASK2_CHGDETRUN_SHIFT 1
124#define INTMASK2_CHGTYP_SHIFT 0
125#define INTMASK2_VIDRM_MASK (1 << INTMASK2_VIDRM_SHIFT)
126#define INTMASK2_VBVOLT_MASK (1 << INTMASK2_VBVOLT_SHIFT)
127#define INTMASK2_DXOVP_MASK (1 << INTMASK2_DXOVP_SHIFT)
128#define INTMASK2_DCDTMR_MASK (1 << INTMASK2_DCDTMR_SHIFT)
129#define INTMASK2_CHGDETRUN_MASK (1 << INTMASK2_CHGDETRUN_SHIFT)
130#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT)
131
109/* MAX77693 MUIC - STATUS1~3 Register */ 132/* MAX77693 MUIC - STATUS1~3 Register */
110#define STATUS1_ADC_SHIFT (0) 133#define STATUS1_ADC_SHIFT (0)
111#define STATUS1_ADCLOW_SHIFT (5) 134#define STATUS1_ADCLOW_SHIFT (5)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7acc9dc73c9f..e19ff30ad0a2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -87,7 +87,6 @@ extern unsigned int kobjsize(const void *objp);
87#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 87#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
88#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 88#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
89 89
90#define VM_POPULATE 0x00001000
91#define VM_LOCKED 0x00002000 90#define VM_LOCKED 0x00002000
92#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 91#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
93 92
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 61c7a87e5d2b..9aa863da287f 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -79,8 +79,6 @@ calc_vm_flag_bits(unsigned long flags)
79{ 79{
80 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | 80 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
81 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | 81 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
82 ((flags & MAP_LOCKED) ? (VM_LOCKED | VM_POPULATE) : 0) | 82 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
83 (((flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE) ?
84 VM_POPULATE : 0);
85} 83}
86#endif /* _LINUX_MMAN_H */ 84#endif /* _LINUX_MMAN_H */
diff --git a/include/linux/mount.h b/include/linux/mount.h
index d7029f4a191a..73005f9957ea 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -47,6 +47,8 @@ struct mnt_namespace;
47 47
48#define MNT_INTERNAL 0x4000 48#define MNT_INTERNAL 0x4000
49 49
50#define MNT_LOCK_READONLY 0x400000
51
50struct vfsmount { 52struct vfsmount {
51 struct dentry *mnt_root; /* root of the mounted tree */ 53 struct dentry *mnt_root; /* root of the mounted tree */
52 struct super_block *mnt_sb; /* pointer to superblock */ 54 struct super_block *mnt_sb; /* pointer to superblock */
diff --git a/include/linux/mxsfb.h b/include/linux/mxsfb.h
index f14943d55315..f80af8674342 100644
--- a/include/linux/mxsfb.h
+++ b/include/linux/mxsfb.h
@@ -24,8 +24,8 @@
24#define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */ 24#define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */
25#define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */ 25#define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
26 26
27#define FB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6) 27#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
28#define FB_SYNC_DOTCLK_FAILING_ACT (1 << 7) /* failing/negtive edge sampling */ 28#define MXSFB_SYNC_DOTCLK_FAILING_ACT (1 << 7) /* failing/negtive edge sampling */
29 29
30struct mxsfb_platform_data { 30struct mxsfb_platform_data {
31 struct fb_videomode *mode_list; 31 struct fb_videomode *mode_list;
@@ -44,6 +44,9 @@ struct mxsfb_platform_data {
44 * allocated. If specified,fb_size must also be specified. 44 * allocated. If specified,fb_size must also be specified.
45 * fb_phys must be unused by Linux. 45 * fb_phys must be unused by Linux.
46 */ 46 */
47 u32 sync; /* sync mask, contains MXSFB specifics not
48 * carried in fb_info->var.sync
49 */
47}; 50};
48 51
49#endif /* __LINUX_MXSFB_H */ 52#endif /* __LINUX_MXSFB_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b3d00fa4b314..6151e903eef0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -210,9 +210,9 @@ struct netdev_hw_addr {
210#define NETDEV_HW_ADDR_T_SLAVE 3 210#define NETDEV_HW_ADDR_T_SLAVE 3
211#define NETDEV_HW_ADDR_T_UNICAST 4 211#define NETDEV_HW_ADDR_T_UNICAST 4
212#define NETDEV_HW_ADDR_T_MULTICAST 5 212#define NETDEV_HW_ADDR_T_MULTICAST 5
213 bool synced;
214 bool global_use; 213 bool global_use;
215 int refcount; 214 int refcount;
215 int synced;
216 struct rcu_head rcu_head; 216 struct rcu_head rcu_head;
217}; 217};
218 218
@@ -895,7 +895,7 @@ struct netdev_fcoe_hbainfo {
895 * 895 *
896 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) 896 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
897 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 897 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
898 * struct net_device *dev) 898 * struct net_device *dev, u32 filter_mask)
899 * 899 *
900 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 900 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
901 * Called to change device carrier. Soft-devices (like dummy, team, etc) 901 * Called to change device carrier. Soft-devices (like dummy, team, etc)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 2461033a7987..710067f3618c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -916,6 +916,7 @@ void pci_disable_rom(struct pci_dev *pdev);
916void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); 916void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
917void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); 917void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
918size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); 918size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
919void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
919 920
920/* Power management related routines */ 921/* Power management related routines */
921int pci_save_state(struct pci_dev *dev); 922int pci_save_state(struct pci_dev *dev);
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 5a710b9c578e..87a03c746f17 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -93,14 +93,20 @@ do { \
93 93
94#else /* !CONFIG_PREEMPT_COUNT */ 94#else /* !CONFIG_PREEMPT_COUNT */
95 95
96#define preempt_disable() do { } while (0) 96/*
97#define sched_preempt_enable_no_resched() do { } while (0) 97 * Even if we don't have any preemption, we need preempt disable/enable
98#define preempt_enable_no_resched() do { } while (0) 98 * to be barriers, so that we don't have things like get_user/put_user
99#define preempt_enable() do { } while (0) 99 * that can cause faults and scheduling migrate into our preempt-protected
100 100 * region.
101#define preempt_disable_notrace() do { } while (0) 101 */
102#define preempt_enable_no_resched_notrace() do { } while (0) 102#define preempt_disable() barrier()
103#define preempt_enable_notrace() do { } while (0) 103#define sched_preempt_enable_no_resched() barrier()
104#define preempt_enable_no_resched() barrier()
105#define preempt_enable() barrier()
106
107#define preempt_disable_notrace() barrier()
108#define preempt_enable_no_resched_notrace() barrier()
109#define preempt_enable_notrace() barrier()
104 110
105#endif /* CONFIG_PREEMPT_COUNT */ 111#endif /* CONFIG_PREEMPT_COUNT */
106 112
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 8307f2f94d86..94dfb2aa5533 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -117,6 +117,7 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
117 const struct file_operations *proc_fops, 117 const struct file_operations *proc_fops,
118 void *data); 118 void *data);
119extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); 119extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent);
120extern int remove_proc_subtree(const char *name, struct proc_dir_entry *parent);
120 121
121struct pid_namespace; 122struct pid_namespace;
122 123
@@ -202,6 +203,7 @@ static inline struct proc_dir_entry *proc_create_data(const char *name,
202 return NULL; 203 return NULL;
203} 204}
204#define remove_proc_entry(name, parent) do {} while (0) 205#define remove_proc_entry(name, parent) do {} while (0)
206#define remove_proc_subtree(name, parent) do {} while (0)
205 207
206static inline struct proc_dir_entry *proc_symlink(const char *name, 208static inline struct proc_dir_entry *proc_symlink(const char *name,
207 struct proc_dir_entry *parent,const char *dest) {return NULL;} 209 struct proc_dir_entry *parent,const char *dest) {return NULL;}
diff --git a/include/linux/security.h b/include/linux/security.h
index eee7478cda70..032c366ef1c6 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1012,6 +1012,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1012 * This hook can be used by the module to update any security state 1012 * This hook can be used by the module to update any security state
1013 * associated with the TUN device's security structure. 1013 * associated with the TUN device's security structure.
1014 * @security pointer to the TUN devices's security structure. 1014 * @security pointer to the TUN devices's security structure.
1015 * @skb_owned_by:
1016 * This hook sets the packet's owning sock.
1017 * @skb is the packet.
1018 * @sk the sock which owns the packet.
1015 * 1019 *
1016 * Security hooks for XFRM operations. 1020 * Security hooks for XFRM operations.
1017 * 1021 *
@@ -1638,6 +1642,7 @@ struct security_operations {
1638 int (*tun_dev_attach_queue) (void *security); 1642 int (*tun_dev_attach_queue) (void *security);
1639 int (*tun_dev_attach) (struct sock *sk, void *security); 1643 int (*tun_dev_attach) (struct sock *sk, void *security);
1640 int (*tun_dev_open) (void *security); 1644 int (*tun_dev_open) (void *security);
1645 void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk);
1641#endif /* CONFIG_SECURITY_NETWORK */ 1646#endif /* CONFIG_SECURITY_NETWORK */
1642 1647
1643#ifdef CONFIG_SECURITY_NETWORK_XFRM 1648#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -2588,6 +2593,8 @@ int security_tun_dev_attach_queue(void *security);
2588int security_tun_dev_attach(struct sock *sk, void *security); 2593int security_tun_dev_attach(struct sock *sk, void *security);
2589int security_tun_dev_open(void *security); 2594int security_tun_dev_open(void *security);
2590 2595
2596void security_skb_owned_by(struct sk_buff *skb, struct sock *sk);
2597
2591#else /* CONFIG_SECURITY_NETWORK */ 2598#else /* CONFIG_SECURITY_NETWORK */
2592static inline int security_unix_stream_connect(struct sock *sock, 2599static inline int security_unix_stream_connect(struct sock *sock,
2593 struct sock *other, 2600 struct sock *other,
@@ -2779,6 +2786,11 @@ static inline int security_tun_dev_open(void *security)
2779{ 2786{
2780 return 0; 2787 return 0;
2781} 2788}
2789
2790static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
2791{
2792}
2793
2782#endif /* CONFIG_SECURITY_NETWORK */ 2794#endif /* CONFIG_SECURITY_NETWORK */
2783 2795
2784#ifdef CONFIG_SECURITY_NETWORK_XFRM 2796#ifdef CONFIG_SECURITY_NETWORK_XFRM
diff --git a/include/linux/signal.h b/include/linux/signal.h
index a2dcb94ea49d..9475c5cb28bc 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -250,11 +250,11 @@ extern int show_unhandled_signals;
250extern int sigsuspend(sigset_t *); 250extern int sigsuspend(sigset_t *);
251 251
252struct sigaction { 252struct sigaction {
253#ifndef __ARCH_HAS_ODD_SIGACTION 253#ifndef __ARCH_HAS_IRIX_SIGACTION
254 __sighandler_t sa_handler; 254 __sighandler_t sa_handler;
255 unsigned long sa_flags; 255 unsigned long sa_flags;
256#else 256#else
257 unsigned long sa_flags; 257 unsigned int sa_flags;
258 __sighandler_t sa_handler; 258 __sighandler_t sa_handler;
259#endif 259#endif
260#ifdef __ARCH_HAS_SA_RESTORER 260#ifdef __ARCH_HAS_SA_RESTORER
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 441f5bfdab8e..b8292d8cc9fa 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2643,6 +2643,13 @@ static inline void nf_reset(struct sk_buff *skb)
2643#endif 2643#endif
2644} 2644}
2645 2645
2646static inline void nf_reset_trace(struct sk_buff *skb)
2647{
2648#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
2649 skb->nf_trace = 0;
2650#endif
2651}
2652
2646/* Note: This doesn't put any conntrack and bridge info in dst. */ 2653/* Note: This doesn't put any conntrack and bridge info in dst. */
2647static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2654static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2648{ 2655{
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index a26e2fb604e6..e2369c167dbd 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -16,7 +16,10 @@
16 * In the debug case, 1 means unlocked, 0 means locked. (the values 16 * In the debug case, 1 means unlocked, 0 means locked. (the values
17 * are inverted, to catch initialization bugs) 17 * are inverted, to catch initialization bugs)
18 * 18 *
19 * No atomicity anywhere, we are on UP. 19 * No atomicity anywhere, we are on UP. However, we still need
20 * the compiler barriers, because we do not want the compiler to
21 * move potentially faulting instructions (notably user accesses)
22 * into the locked sequence, resulting in non-atomic execution.
20 */ 23 */
21 24
22#ifdef CONFIG_DEBUG_SPINLOCK 25#ifdef CONFIG_DEBUG_SPINLOCK
@@ -25,6 +28,7 @@
25static inline void arch_spin_lock(arch_spinlock_t *lock) 28static inline void arch_spin_lock(arch_spinlock_t *lock)
26{ 29{
27 lock->slock = 0; 30 lock->slock = 0;
31 barrier();
28} 32}
29 33
30static inline void 34static inline void
@@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
32{ 36{
33 local_irq_save(flags); 37 local_irq_save(flags);
34 lock->slock = 0; 38 lock->slock = 0;
39 barrier();
35} 40}
36 41
37static inline int arch_spin_trylock(arch_spinlock_t *lock) 42static inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
39 char oldval = lock->slock; 44 char oldval = lock->slock;
40 45
41 lock->slock = 0; 46 lock->slock = 0;
47 barrier();
42 48
43 return oldval > 0; 49 return oldval > 0;
44} 50}
45 51
46static inline void arch_spin_unlock(arch_spinlock_t *lock) 52static inline void arch_spin_unlock(arch_spinlock_t *lock)
47{ 53{
54 barrier();
48 lock->slock = 1; 55 lock->slock = 1;
49} 56}
50 57
51/* 58/*
52 * Read-write spinlocks. No debug version. 59 * Read-write spinlocks. No debug version.
53 */ 60 */
54#define arch_read_lock(lock) do { (void)(lock); } while (0) 61#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
55#define arch_write_lock(lock) do { (void)(lock); } while (0) 62#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
56#define arch_read_trylock(lock) ({ (void)(lock); 1; }) 63#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
57#define arch_write_trylock(lock) ({ (void)(lock); 1; }) 64#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
58#define arch_read_unlock(lock) do { (void)(lock); } while (0) 65#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
59#define arch_write_unlock(lock) do { (void)(lock); } while (0) 66#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
60 67
61#else /* DEBUG_SPINLOCK */ 68#else /* DEBUG_SPINLOCK */
62#define arch_spin_is_locked(lock) ((void)(lock), 0) 69#define arch_spin_is_locked(lock) ((void)(lock), 0)
63/* for sched.c and kernel_lock.c: */ 70/* for sched.c and kernel_lock.c: */
64# define arch_spin_lock(lock) do { (void)(lock); } while (0) 71# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
65# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) 72# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
66# define arch_spin_unlock(lock) do { (void)(lock); } while (0) 73# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
67# define arch_spin_trylock(lock) ({ (void)(lock); 1; }) 74# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
68#endif /* DEBUG_SPINLOCK */ 75#endif /* DEBUG_SPINLOCK */
69 76
70#define arch_spin_is_contended(lock) (((void)(lock), 0)) 77#define arch_spin_is_contended(lock) (((void)(lock), 0))
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index f0bd7f90a90d..e3c0ae9bb1fa 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -44,7 +44,7 @@
44/* Adding event notification support elements */ 44/* Adding event notification support elements */
45#define THERMAL_GENL_FAMILY_NAME "thermal_event" 45#define THERMAL_GENL_FAMILY_NAME "thermal_event"
46#define THERMAL_GENL_VERSION 0x01 46#define THERMAL_GENL_VERSION 0x01
47#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_group" 47#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
48 48
49/* Default Thermal Governor */ 49/* Default Thermal Governor */
50#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) 50#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 9d81de123c90..42278bbf7a88 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -68,6 +68,7 @@ struct udp_sock {
68 * For encapsulation sockets. 68 * For encapsulation sockets.
69 */ 69 */
70 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 70 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
71 void (*encap_destroy)(struct sock *sk);
71}; 72};
72 73
73static inline struct udp_sock *udp_sk(const struct sock *sk) 74static inline struct udp_sock *udp_sk(const struct sock *sk)
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 0a78df5f6cfd..59694b5e5e90 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -357,6 +357,7 @@ struct hc_driver {
357 */ 357 */
358 int (*disable_usb3_lpm_timeout)(struct usb_hcd *, 358 int (*disable_usb3_lpm_timeout)(struct usb_hcd *,
359 struct usb_device *, enum usb3_link_state state); 359 struct usb_device *, enum usb3_link_state state);
360 int (*find_raw_port_number)(struct usb_hcd *, int);
360}; 361};
361 362
362extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); 363extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
@@ -396,6 +397,7 @@ extern int usb_hcd_is_primary_hcd(struct usb_hcd *hcd);
396extern int usb_add_hcd(struct usb_hcd *hcd, 397extern int usb_add_hcd(struct usb_hcd *hcd,
397 unsigned int irqnum, unsigned long irqflags); 398 unsigned int irqnum, unsigned long irqflags);
398extern void usb_remove_hcd(struct usb_hcd *hcd); 399extern void usb_remove_hcd(struct usb_hcd *hcd);
400extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1);
399 401
400struct platform_device; 402struct platform_device;
401extern void usb_hcd_platform_shutdown(struct platform_device *dev); 403extern void usb_hcd_platform_shutdown(struct platform_device *dev);
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 4ce009324933..b6b215f13b45 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -26,6 +26,8 @@ struct user_namespace {
26 kuid_t owner; 26 kuid_t owner;
27 kgid_t group; 27 kgid_t group;
28 unsigned int proc_inum; 28 unsigned int proc_inum;
29 bool may_mount_sysfs;
30 bool may_mount_proc;
29}; 31};
30 32
31extern struct user_namespace init_user_ns; 33extern struct user_namespace init_user_ns;
@@ -82,4 +84,6 @@ static inline void put_user_ns(struct user_namespace *ns)
82 84
83#endif 85#endif
84 86
87void update_mnt_policy(struct user_namespace *userns);
88
85#endif /* _LINUX_USER_H */ 89#endif /* _LINUX_USER_H */
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
index 80461c1ae9ef..bb8271d487b7 100644
--- a/include/net/flow_keys.h
+++ b/include/net/flow_keys.h
@@ -9,6 +9,7 @@ struct flow_keys {
9 __be32 ports; 9 __be32 ports;
10 __be16 port16[2]; 10 __be16 port16[2];
11 }; 11 };
12 u16 thoff;
12 u8 ip_proto; 13 u8 ip_proto;
13}; 14};
14 15
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 68c69d54d392..fce8e6b66d55 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -976,6 +976,7 @@ struct netns_ipvs {
976 int sysctl_sync_retries; 976 int sysctl_sync_retries;
977 int sysctl_nat_icmp_send; 977 int sysctl_nat_icmp_send;
978 int sysctl_pmtu_disc; 978 int sysctl_pmtu_disc;
979 int sysctl_backup_only;
979 980
980 /* ip_vs_lblc */ 981 /* ip_vs_lblc */
981 int sysctl_lblc_expiration; 982 int sysctl_lblc_expiration;
@@ -1067,6 +1068,12 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
1067 return ipvs->sysctl_pmtu_disc; 1068 return ipvs->sysctl_pmtu_disc;
1068} 1069}
1069 1070
1071static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
1072{
1073 return ipvs->sync_state & IP_VS_STATE_BACKUP &&
1074 ipvs->sysctl_backup_only;
1075}
1076
1070#else 1077#else
1071 1078
1072static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 1079static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1114,6 +1121,11 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
1114 return 1; 1121 return 1;
1115} 1122}
1116 1123
1124static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
1125{
1126 return 0;
1127}
1128
1117#endif 1129#endif
1118 1130
1119/* 1131/*
diff --git a/include/net/ipip.h b/include/net/ipip.h
index fd19625ff99d..982141c15200 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -77,15 +77,11 @@ static inline void tunnel_ip_select_ident(struct sk_buff *skb,
77{ 77{
78 struct iphdr *iph = ip_hdr(skb); 78 struct iphdr *iph = ip_hdr(skb);
79 79
80 if (iph->frag_off & htons(IP_DF)) 80 /* Use inner packet iph-id if possible. */
81 iph->id = 0; 81 if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
82 else { 82 iph->id = old_iph->id;
83 /* Use inner packet iph-id if possible. */ 83 else
84 if (skb->protocol == htons(ETH_P_IP) && old_iph->id) 84 __ip_select_ident(iph, dst,
85 iph->id = old_iph->id; 85 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
86 else
87 __ip_select_ident(iph, dst,
88 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
89 }
90} 86}
91#endif 87#endif
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index cc7c19732389..714cc9a54a4c 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -130,6 +130,14 @@ struct iucv_sock {
130 enum iucv_tx_notify n); 130 enum iucv_tx_notify n);
131}; 131};
132 132
133struct iucv_skb_cb {
134 u32 class; /* target class of message */
135 u32 tag; /* tag associated with message */
136 u32 offset; /* offset for skb receival */
137};
138
139#define IUCV_SKB_CB(__skb) ((struct iucv_skb_cb *)&((__skb)->cb[0]))
140
133/* iucv socket options (SOL_IUCV) */ 141/* iucv socket options (SOL_IUCV) */
134#define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */ 142#define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */
135#define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */ 143#define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 399162b50a8d..e1379b4e8faf 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -1074,7 +1074,8 @@ void fc_rport_terminate_io(struct fc_rport *);
1074/* 1074/*
1075 * DISCOVERY LAYER 1075 * DISCOVERY LAYER
1076 *****************************/ 1076 *****************************/
1077int fc_disc_init(struct fc_lport *); 1077void fc_disc_init(struct fc_lport *);
1078void fc_disc_config(struct fc_lport *, void *);
1078 1079
1079static inline struct fc_lport *fc_disc_lport(struct fc_disc *disc) 1080static inline struct fc_lport *fc_disc_lport(struct fc_disc *disc)
1080{ 1081{
diff --git a/include/sound/max98090.h b/include/sound/max98090.h
index 95efb13f8478..95efb13f8478 100755..100644
--- a/include/sound/max98090.h
+++ b/include/sound/max98090.h
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index d4c004929ae5..d4609029f014 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -488,6 +488,7 @@ struct snd_soc_dapm_path {
488 /* status */ 488 /* status */
489 u32 connect:1; /* source and sink widgets are connected */ 489 u32 connect:1; /* source and sink widgets are connected */
490 u32 walked:1; /* path has been walked */ 490 u32 walked:1; /* path has been walked */
491 u32 walking:1; /* path is in the process of being walked */
491 u32 weak:1; /* path ignored for power management */ 492 u32 weak:1; /* path ignored for power management */
492 493
493 int (*connected)(struct snd_soc_dapm_widget *source, 494 int (*connected)(struct snd_soc_dapm_widget *source,
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
index 93f5fa94a431..afafd703ad92 100644
--- a/include/uapi/linux/packet_diag.h
+++ b/include/uapi/linux/packet_diag.h
@@ -33,9 +33,11 @@ enum {
33 PACKET_DIAG_TX_RING, 33 PACKET_DIAG_TX_RING,
34 PACKET_DIAG_FANOUT, 34 PACKET_DIAG_FANOUT,
35 35
36 PACKET_DIAG_MAX, 36 __PACKET_DIAG_MAX,
37}; 37};
38 38
39#define PACKET_DIAG_MAX (__PACKET_DIAG_MAX - 1)
40
39struct packet_diag_info { 41struct packet_diag_info {
40 __u32 pdi_index; 42 __u32 pdi_index;
41 __u32 pdi_version; 43 __u32 pdi_version;
diff --git a/include/uapi/linux/unix_diag.h b/include/uapi/linux/unix_diag.h
index b8a24941db21..b9e2a6a7446f 100644
--- a/include/uapi/linux/unix_diag.h
+++ b/include/uapi/linux/unix_diag.h
@@ -39,9 +39,11 @@ enum {
39 UNIX_DIAG_MEMINFO, 39 UNIX_DIAG_MEMINFO,
40 UNIX_DIAG_SHUTDOWN, 40 UNIX_DIAG_SHUTDOWN,
41 41
42 UNIX_DIAG_MAX, 42 __UNIX_DIAG_MAX,
43}; 43};
44 44
45#define UNIX_DIAG_MAX (__UNIX_DIAG_MAX - 1)
46
45struct unix_diag_vfs { 47struct unix_diag_vfs {
46 __u32 udiag_vfs_ino; 48 __u32 udiag_vfs_ino;
47 __u32 udiag_vfs_dev; 49 __u32 udiag_vfs_dev;
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 01c3d62436ef..ffd4652de91c 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -138,11 +138,21 @@ struct blkif_request_discard {
138 uint8_t _pad3; 138 uint8_t _pad3;
139} __attribute__((__packed__)); 139} __attribute__((__packed__));
140 140
141struct blkif_request_other {
142 uint8_t _pad1;
143 blkif_vdev_t _pad2; /* only for read/write requests */
144#ifdef CONFIG_X86_64
145 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/
146#endif
147 uint64_t id; /* private guest value, echoed in resp */
148} __attribute__((__packed__));
149
141struct blkif_request { 150struct blkif_request {
142 uint8_t operation; /* BLKIF_OP_??? */ 151 uint8_t operation; /* BLKIF_OP_??? */
143 union { 152 union {
144 struct blkif_request_rw rw; 153 struct blkif_request_rw rw;
145 struct blkif_request_discard discard; 154 struct blkif_request_discard discard;
155 struct blkif_request_other other;
146 } u; 156 } u;
147} __attribute__((__packed__)); 157} __attribute__((__packed__));
148 158
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index 1844d31f4552..7000bb1f6e96 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -251,6 +251,12 @@ struct physdev_pci_device_add {
251 251
252#define PHYSDEVOP_pci_device_remove 26 252#define PHYSDEVOP_pci_device_remove 26
253#define PHYSDEVOP_restore_msi_ext 27 253#define PHYSDEVOP_restore_msi_ext 27
254/*
255 * Dom0 should use these two to announce MMIO resources assigned to
256 * MSI-X capable devices won't (prepare) or may (release) change.
257 */
258#define PHYSDEVOP_prepare_msix 30
259#define PHYSDEVOP_release_msix 31
254struct physdev_pci_device { 260struct physdev_pci_device {
255 /* IN */ 261 /* IN */
256 uint16_t seg; 262 uint16_t seg;
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 3953fda2e8bd..e4e47f647446 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -330,8 +330,16 @@ static struct dentry *mqueue_mount(struct file_system_type *fs_type,
330 int flags, const char *dev_name, 330 int flags, const char *dev_name,
331 void *data) 331 void *data)
332{ 332{
333 if (!(flags & MS_KERNMOUNT)) 333 if (!(flags & MS_KERNMOUNT)) {
334 data = current->nsproxy->ipc_ns; 334 struct ipc_namespace *ns = current->nsproxy->ipc_ns;
335 /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
336 * over the ipc namespace.
337 */
338 if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
339 return ERR_PTR(-EPERM);
340
341 data = ns;
342 }
335 return mount_ns(fs_type, flags, data, mqueue_fill_super); 343 return mount_ns(fs_type, flags, data, mqueue_fill_super);
336} 344}
337 345
diff --git a/ipc/msg.c b/ipc/msg.c
index 31cd1bf6af27..fede1d06ef30 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -872,6 +872,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
872 goto out_unlock; 872 goto out_unlock;
873 break; 873 break;
874 } 874 }
875 msg = ERR_PTR(-EAGAIN);
875 } else 876 } else
876 break; 877 break;
877 msg_counter++; 878 msg_counter++;
diff --git a/kernel/capability.c b/kernel/capability.c
index 493d97259484..f6c2ce5701e1 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -393,6 +393,30 @@ bool ns_capable(struct user_namespace *ns, int cap)
393EXPORT_SYMBOL(ns_capable); 393EXPORT_SYMBOL(ns_capable);
394 394
395/** 395/**
396 * file_ns_capable - Determine if the file's opener had a capability in effect
397 * @file: The file we want to check
398 * @ns: The usernamespace we want the capability in
399 * @cap: The capability to be tested for
400 *
401 * Return true if task that opened the file had a capability in effect
402 * when the file was opened.
403 *
404 * This does not set PF_SUPERPRIV because the caller may not
405 * actually be privileged.
406 */
407bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap)
408{
409 if (WARN_ON_ONCE(!cap_valid(cap)))
410 return false;
411
412 if (security_capable(file->f_cred, ns, cap) == 0)
413 return true;
414
415 return false;
416}
417EXPORT_SYMBOL(file_ns_capable);
418
419/**
396 * capable - Determine if the current task has a superior capability in effect 420 * capable - Determine if the current task has a superior capability in effect
397 * @cap: The capability to be tested for 421 * @cap: The capability to be tested for
398 * 422 *
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 59412d037eed..7e0962ed7f8a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4737,7 +4737,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4737 } else { 4737 } else {
4738 if (arch_vma_name(mmap_event->vma)) { 4738 if (arch_vma_name(mmap_event->vma)) {
4739 name = strncpy(tmp, arch_vma_name(mmap_event->vma), 4739 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4740 sizeof(tmp)); 4740 sizeof(tmp) - 1);
4741 tmp[sizeof(tmp) - 1] = '\0';
4741 goto got_name; 4742 goto got_name;
4742 } 4743 }
4743 4744
@@ -5986,6 +5987,7 @@ skip_type:
5986 if (pmu->pmu_cpu_context) 5987 if (pmu->pmu_cpu_context)
5987 goto got_cpu_context; 5988 goto got_cpu_context;
5988 5989
5990 ret = -ENOMEM;
5989 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); 5991 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5990 if (!pmu->pmu_cpu_context) 5992 if (!pmu->pmu_cpu_context)
5991 goto free_dev; 5993 goto free_dev;
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index d56a64c99a8b..eb675c4d59df 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -16,7 +16,7 @@ struct ring_buffer {
16 int page_order; /* allocation order */ 16 int page_order; /* allocation order */
17#endif 17#endif
18 int nr_pages; /* nr of data pages */ 18 int nr_pages; /* nr of data pages */
19 int writable; /* are we writable */ 19 int overwrite; /* can overwrite itself */
20 20
21 atomic_t poll; /* POLL_ for wakeups */ 21 atomic_t poll; /* POLL_ for wakeups */
22 22
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 23cb34ff3973..97fddb09762b 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -18,12 +18,24 @@
18static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, 18static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
19 unsigned long offset, unsigned long head) 19 unsigned long offset, unsigned long head)
20{ 20{
21 unsigned long mask; 21 unsigned long sz = perf_data_size(rb);
22 unsigned long mask = sz - 1;
22 23
23 if (!rb->writable) 24 /*
25 * check if user-writable
26 * overwrite : over-write its own tail
27 * !overwrite: buffer possibly drops events.
28 */
29 if (rb->overwrite)
24 return true; 30 return true;
25 31
26 mask = perf_data_size(rb) - 1; 32 /*
33 * verify that payload is not bigger than buffer
34 * otherwise masking logic may fail to detect
35 * the "not enough space" condition
36 */
37 if ((head - offset) > sz)
38 return false;
27 39
28 offset = (offset - tail) & mask; 40 offset = (offset - tail) & mask;
29 head = (head - tail) & mask; 41 head = (head - tail) & mask;
@@ -212,7 +224,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
212 rb->watermark = max_size / 2; 224 rb->watermark = max_size / 2;
213 225
214 if (flags & RING_BUFFER_WRITABLE) 226 if (flags & RING_BUFFER_WRITABLE)
215 rb->writable = 1; 227 rb->overwrite = 0;
228 else
229 rb->overwrite = 1;
216 230
217 atomic_set(&rb->refcount, 1); 231 atomic_set(&rb->refcount, 1);
218 232
diff --git a/kernel/exit.c b/kernel/exit.c
index 51e485ca9935..60bc027c61c3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -835,7 +835,7 @@ void do_exit(long code)
835 /* 835 /*
836 * Make sure we are holding no locks: 836 * Make sure we are holding no locks:
837 */ 837 */
838 debug_check_no_locks_held(); 838 debug_check_no_locks_held(tsk);
839 /* 839 /*
840 * We can do this unlocked here. The futex code uses this flag 840 * We can do this unlocked here. The futex code uses this flag
841 * just to verify whether the pi state cleanup has been done 841 * just to verify whether the pi state cleanup has been done
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 259db207b5d9..8a0efac4f99d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -4088,7 +4088,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4088} 4088}
4089EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); 4089EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4090 4090
4091static void print_held_locks_bug(void) 4091static void print_held_locks_bug(struct task_struct *curr)
4092{ 4092{
4093 if (!debug_locks_off()) 4093 if (!debug_locks_off())
4094 return; 4094 return;
@@ -4097,21 +4097,22 @@ static void print_held_locks_bug(void)
4097 4097
4098 printk("\n"); 4098 printk("\n");
4099 printk("=====================================\n"); 4099 printk("=====================================\n");
4100 printk("[ BUG: %s/%d still has locks held! ]\n", 4100 printk("[ BUG: lock held at task exit time! ]\n");
4101 current->comm, task_pid_nr(current));
4102 print_kernel_ident(); 4101 print_kernel_ident();
4103 printk("-------------------------------------\n"); 4102 printk("-------------------------------------\n");
4104 lockdep_print_held_locks(current); 4103 printk("%s/%d is exiting with locks still held!\n",
4104 curr->comm, task_pid_nr(curr));
4105 lockdep_print_held_locks(curr);
4106
4105 printk("\nstack backtrace:\n"); 4107 printk("\nstack backtrace:\n");
4106 dump_stack(); 4108 dump_stack();
4107} 4109}
4108 4110
4109void debug_check_no_locks_held(void) 4111void debug_check_no_locks_held(struct task_struct *task)
4110{ 4112{
4111 if (unlikely(current->lockdep_depth > 0)) 4113 if (unlikely(task->lockdep_depth > 0))
4112 print_held_locks_bug(); 4114 print_held_locks_bug(task);
4113} 4115}
4114EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4115 4116
4116void debug_show_all_locks(void) 4117void debug_show_all_locks(void)
4117{ 4118{
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index c1c3dc1c6023..bea15bdf82b0 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -181,6 +181,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
181 int nr; 181 int nr;
182 int rc; 182 int rc;
183 struct task_struct *task, *me = current; 183 struct task_struct *task, *me = current;
184 int init_pids = thread_group_leader(me) ? 1 : 2;
184 185
185 /* Don't allow any more processes into the pid namespace */ 186 /* Don't allow any more processes into the pid namespace */
186 disable_pid_allocation(pid_ns); 187 disable_pid_allocation(pid_ns);
@@ -230,7 +231,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
230 */ 231 */
231 for (;;) { 232 for (;;) {
232 set_current_state(TASK_UNINTERRUPTIBLE); 233 set_current_state(TASK_UNINTERRUPTIBLE);
233 if (pid_ns->nr_hashed == 1) 234 if (pid_ns->nr_hashed == init_pids)
234 break; 235 break;
235 schedule(); 236 schedule();
236 } 237 }
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index c685e31492df..c3ae1446461c 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
176 u64 this_clock, remote_clock; 176 u64 this_clock, remote_clock;
177 u64 *ptr, old_val, val; 177 u64 *ptr, old_val, val;
178 178
179#if BITS_PER_LONG != 64
180again:
181 /*
182 * Careful here: The local and the remote clock values need to
183 * be read out atomic as we need to compare the values and
184 * then update either the local or the remote side. So the
185 * cmpxchg64 below only protects one readout.
186 *
187 * We must reread via sched_clock_local() in the retry case on
188 * 32bit as an NMI could use sched_clock_local() via the
189 * tracer and hit between the readout of
190 * the low32bit and the high 32bit portion.
191 */
192 this_clock = sched_clock_local(my_scd);
193 /*
194 * We must enforce atomic readout on 32bit, otherwise the
195 * update on the remote cpu can hit inbetween the readout of
196 * the low32bit and the high 32bit portion.
197 */
198 remote_clock = cmpxchg64(&scd->clock, 0, 0);
199#else
200 /*
201 * On 64bit the read of [my]scd->clock is atomic versus the
202 * update, so we can avoid the above 32bit dance.
203 */
179 sched_clock_local(my_scd); 204 sched_clock_local(my_scd);
180again: 205again:
181 this_clock = my_scd->clock; 206 this_clock = my_scd->clock;
182 remote_clock = scd->clock; 207 remote_clock = scd->clock;
208#endif
183 209
184 /* 210 /*
185 * Use the opportunity that we have both locks 211 * Use the opportunity that we have both locks
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7f12624a393c..67d04651f44b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1498,8 +1498,10 @@ static void try_to_wake_up_local(struct task_struct *p)
1498{ 1498{
1499 struct rq *rq = task_rq(p); 1499 struct rq *rq = task_rq(p);
1500 1500
1501 BUG_ON(rq != this_rq()); 1501 if (WARN_ON_ONCE(rq != this_rq()) ||
1502 BUG_ON(p == current); 1502 WARN_ON_ONCE(p == current))
1503 return;
1504
1503 lockdep_assert_held(&rq->lock); 1505 lockdep_assert_held(&rq->lock);
1504 1506
1505 if (!raw_spin_trylock(&p->pi_lock)) { 1507 if (!raw_spin_trylock(&p->pi_lock)) {
@@ -4999,7 +5001,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
4999} 5001}
5000 5002
5001static int min_load_idx = 0; 5003static int min_load_idx = 0;
5002static int max_load_idx = CPU_LOAD_IDX_MAX; 5004static int max_load_idx = CPU_LOAD_IDX_MAX-1;
5003 5005
5004static void 5006static void
5005set_table_entry(struct ctl_table *entry, 5007set_table_entry(struct ctl_table *entry,
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index ed12cbb135f4..e93cca92f38b 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -310,7 +310,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
310 310
311 t = tsk; 311 t = tsk;
312 do { 312 do {
313 task_cputime(tsk, &utime, &stime); 313 task_cputime(t, &utime, &stime);
314 times->utime += utime; 314 times->utime += utime;
315 times->stime += stime; 315 times->stime += stime;
316 times->sum_exec_runtime += task_sched_runtime(t); 316 times->sum_exec_runtime += task_sched_runtime(t);
diff --git a/kernel/sys.c b/kernel/sys.c
index 39c9c4a2949f..0da73cf73e60 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -324,7 +324,6 @@ void kernel_restart_prepare(char *cmd)
324 system_state = SYSTEM_RESTART; 324 system_state = SYSTEM_RESTART;
325 usermodehelper_disable(); 325 usermodehelper_disable();
326 device_shutdown(); 326 device_shutdown();
327 syscore_shutdown();
328} 327}
329 328
330/** 329/**
@@ -370,6 +369,7 @@ void kernel_restart(char *cmd)
370{ 369{
371 kernel_restart_prepare(cmd); 370 kernel_restart_prepare(cmd);
372 disable_nonboot_cpus(); 371 disable_nonboot_cpus();
372 syscore_shutdown();
373 if (!cmd) 373 if (!cmd)
374 printk(KERN_EMERG "Restarting system.\n"); 374 printk(KERN_EMERG "Restarting system.\n");
375 else 375 else
@@ -395,6 +395,7 @@ static void kernel_shutdown_prepare(enum system_states state)
395void kernel_halt(void) 395void kernel_halt(void)
396{ 396{
397 kernel_shutdown_prepare(SYSTEM_HALT); 397 kernel_shutdown_prepare(SYSTEM_HALT);
398 disable_nonboot_cpus();
398 syscore_shutdown(); 399 syscore_shutdown();
399 printk(KERN_EMERG "System halted.\n"); 400 printk(KERN_EMERG "System halted.\n");
400 kmsg_dump(KMSG_DUMP_HALT); 401 kmsg_dump(KMSG_DUMP_HALT);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 2fb8cb88df8d..7f32fe0e52cd 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -67,7 +67,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
67 */ 67 */
68int tick_check_broadcast_device(struct clock_event_device *dev) 68int tick_check_broadcast_device(struct clock_event_device *dev)
69{ 69{
70 if ((tick_broadcast_device.evtdev && 70 if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
71 (tick_broadcast_device.evtdev &&
71 tick_broadcast_device.evtdev->rating >= dev->rating) || 72 tick_broadcast_device.evtdev->rating >= dev->rating) ||
72 (dev->features & CLOCK_EVT_FEAT_C3STOP)) 73 (dev->features & CLOCK_EVT_FEAT_C3STOP))
73 return 0; 74 return 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6893d5a2bf08..b3fde6d7b7fc 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -66,7 +66,7 @@
66 66
67static struct ftrace_ops ftrace_list_end __read_mostly = { 67static struct ftrace_ops ftrace_list_end __read_mostly = {
68 .func = ftrace_stub, 68 .func = ftrace_stub,
69 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 69 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
70}; 70};
71 71
72/* ftrace_enabled is a method to turn ftrace on or off */ 72/* ftrace_enabled is a method to turn ftrace on or off */
@@ -694,7 +694,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
694 free_page(tmp); 694 free_page(tmp);
695 } 695 }
696 696
697 free_page((unsigned long)stat->pages);
698 stat->pages = NULL; 697 stat->pages = NULL;
699 stat->start = NULL; 698 stat->start = NULL;
700 699
@@ -1053,6 +1052,19 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1053 1052
1054static struct pid * const ftrace_swapper_pid = &init_struct_pid; 1053static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1055 1054
1055loff_t
1056ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1057{
1058 loff_t ret;
1059
1060 if (file->f_mode & FMODE_READ)
1061 ret = seq_lseek(file, offset, whence);
1062 else
1063 file->f_pos = ret = 1;
1064
1065 return ret;
1066}
1067
1056#ifdef CONFIG_DYNAMIC_FTRACE 1068#ifdef CONFIG_DYNAMIC_FTRACE
1057 1069
1058#ifndef CONFIG_FTRACE_MCOUNT_RECORD 1070#ifndef CONFIG_FTRACE_MCOUNT_RECORD
@@ -2613,7 +2625,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash)
2613 * routine, you can use ftrace_filter_write() for the write 2625 * routine, you can use ftrace_filter_write() for the write
2614 * routine if @flag has FTRACE_ITER_FILTER set, or 2626 * routine if @flag has FTRACE_ITER_FILTER set, or
2615 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. 2627 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2616 * ftrace_regex_lseek() should be used as the lseek routine, and 2628 * ftrace_filter_lseek() should be used as the lseek routine, and
2617 * release must call ftrace_regex_release(). 2629 * release must call ftrace_regex_release().
2618 */ 2630 */
2619int 2631int
@@ -2697,19 +2709,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
2697 inode, file); 2709 inode, file);
2698} 2710}
2699 2711
2700loff_t
2701ftrace_regex_lseek(struct file *file, loff_t offset, int whence)
2702{
2703 loff_t ret;
2704
2705 if (file->f_mode & FMODE_READ)
2706 ret = seq_lseek(file, offset, whence);
2707 else
2708 file->f_pos = ret = 1;
2709
2710 return ret;
2711}
2712
2713static int ftrace_match(char *str, char *regex, int len, int type) 2712static int ftrace_match(char *str, char *regex, int len, int type)
2714{ 2713{
2715 int matched = 0; 2714 int matched = 0;
@@ -3441,14 +3440,14 @@ static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3441 3440
3442static int __init set_ftrace_notrace(char *str) 3441static int __init set_ftrace_notrace(char *str)
3443{ 3442{
3444 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); 3443 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3445 return 1; 3444 return 1;
3446} 3445}
3447__setup("ftrace_notrace=", set_ftrace_notrace); 3446__setup("ftrace_notrace=", set_ftrace_notrace);
3448 3447
3449static int __init set_ftrace_filter(char *str) 3448static int __init set_ftrace_filter(char *str)
3450{ 3449{
3451 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); 3450 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3452 return 1; 3451 return 1;
3453} 3452}
3454__setup("ftrace_filter=", set_ftrace_filter); 3453__setup("ftrace_filter=", set_ftrace_filter);
@@ -3571,7 +3570,7 @@ static const struct file_operations ftrace_filter_fops = {
3571 .open = ftrace_filter_open, 3570 .open = ftrace_filter_open,
3572 .read = seq_read, 3571 .read = seq_read,
3573 .write = ftrace_filter_write, 3572 .write = ftrace_filter_write,
3574 .llseek = ftrace_regex_lseek, 3573 .llseek = ftrace_filter_lseek,
3575 .release = ftrace_regex_release, 3574 .release = ftrace_regex_release,
3576}; 3575};
3577 3576
@@ -3579,7 +3578,7 @@ static const struct file_operations ftrace_notrace_fops = {
3579 .open = ftrace_notrace_open, 3578 .open = ftrace_notrace_open,
3580 .read = seq_read, 3579 .read = seq_read,
3581 .write = ftrace_notrace_write, 3580 .write = ftrace_notrace_write,
3582 .llseek = ftrace_regex_lseek, 3581 .llseek = ftrace_filter_lseek,
3583 .release = ftrace_regex_release, 3582 .release = ftrace_regex_release,
3584}; 3583};
3585 3584
@@ -3784,8 +3783,8 @@ static const struct file_operations ftrace_graph_fops = {
3784 .open = ftrace_graph_open, 3783 .open = ftrace_graph_open,
3785 .read = seq_read, 3784 .read = seq_read,
3786 .write = ftrace_graph_write, 3785 .write = ftrace_graph_write,
3786 .llseek = ftrace_filter_lseek,
3787 .release = ftrace_graph_release, 3787 .release = ftrace_graph_release,
3788 .llseek = seq_lseek,
3789}; 3788};
3790#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 3789#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3791 3790
@@ -4131,7 +4130,8 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4131 preempt_disable_notrace(); 4130 preempt_disable_notrace();
4132 trace_recursion_set(TRACE_CONTROL_BIT); 4131 trace_recursion_set(TRACE_CONTROL_BIT);
4133 do_for_each_ftrace_op(op, ftrace_control_list) { 4132 do_for_each_ftrace_op(op, ftrace_control_list) {
4134 if (!ftrace_function_local_disabled(op) && 4133 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4134 !ftrace_function_local_disabled(op) &&
4135 ftrace_ops_test(op, ip)) 4135 ftrace_ops_test(op, ip))
4136 op->func(ip, parent_ip, op, regs); 4136 op->func(ip, parent_ip, op, regs);
4137 } while_for_each_ftrace_op(op); 4137 } while_for_each_ftrace_op(op);
@@ -4439,7 +4439,7 @@ static const struct file_operations ftrace_pid_fops = {
4439 .open = ftrace_pid_open, 4439 .open = ftrace_pid_open,
4440 .write = ftrace_pid_write, 4440 .write = ftrace_pid_write,
4441 .read = seq_read, 4441 .read = seq_read,
4442 .llseek = seq_lseek, 4442 .llseek = ftrace_filter_lseek,
4443 .release = ftrace_pid_release, 4443 .release = ftrace_pid_release,
4444}; 4444};
4445 4445
@@ -4555,12 +4555,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
4555 ftrace_startup_sysctl(); 4555 ftrace_startup_sysctl();
4556 4556
4557 /* we are starting ftrace again */ 4557 /* we are starting ftrace again */
4558 if (ftrace_ops_list != &ftrace_list_end) { 4558 if (ftrace_ops_list != &ftrace_list_end)
4559 if (ftrace_ops_list->next == &ftrace_list_end) 4559 update_ftrace_function();
4560 ftrace_trace_function = ftrace_ops_list->func;
4561 else
4562 ftrace_trace_function = ftrace_ops_list_func;
4563 }
4564 4560
4565 } else { 4561 } else {
4566 /* stopping ftrace calls (just send to ftrace_stub) */ 4562 /* stopping ftrace calls (just send to ftrace_stub) */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4f1dade56981..66338c4f7f4b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -132,7 +132,7 @@ static char *default_bootup_tracer;
132 132
133static int __init set_cmdline_ftrace(char *str) 133static int __init set_cmdline_ftrace(char *str)
134{ 134{
135 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 135 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
136 default_bootup_tracer = bootup_tracer_buf; 136 default_bootup_tracer = bootup_tracer_buf;
137 /* We are using ftrace early, expand it */ 137 /* We are using ftrace early, expand it */
138 ring_buffer_expanded = 1; 138 ring_buffer_expanded = 1;
@@ -162,7 +162,7 @@ static char *trace_boot_options __initdata;
162 162
163static int __init set_trace_boot_options(char *str) 163static int __init set_trace_boot_options(char *str)
164{ 164{
165 strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 165 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
166 trace_boot_options = trace_boot_options_buf; 166 trace_boot_options = trace_boot_options_buf;
167 return 0; 167 return 0;
168} 168}
@@ -744,8 +744,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
744 return; 744 return;
745 745
746 WARN_ON_ONCE(!irqs_disabled()); 746 WARN_ON_ONCE(!irqs_disabled());
747 if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) 747 if (!current_trace->allocated_snapshot) {
748 /* Only the nop tracer should hit this when disabling */
749 WARN_ON_ONCE(current_trace != &nop_trace);
748 return; 750 return;
751 }
749 752
750 arch_spin_lock(&ftrace_max_lock); 753 arch_spin_lock(&ftrace_max_lock);
751 754
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 42ca822fc701..83a8b5b7bd35 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -322,7 +322,7 @@ static const struct file_operations stack_trace_filter_fops = {
322 .open = stack_trace_filter_open, 322 .open = stack_trace_filter_open,
323 .read = seq_read, 323 .read = seq_read,
324 .write = ftrace_filter_write, 324 .write = ftrace_filter_write,
325 .llseek = ftrace_regex_lseek, 325 .llseek = ftrace_filter_lseek,
326 .release = ftrace_regex_release, 326 .release = ftrace_regex_release,
327}; 327};
328 328
diff --git a/kernel/user.c b/kernel/user.c
index e81978e8c03b..8e635a18ab52 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -51,6 +51,8 @@ struct user_namespace init_user_ns = {
51 .owner = GLOBAL_ROOT_UID, 51 .owner = GLOBAL_ROOT_UID,
52 .group = GLOBAL_ROOT_GID, 52 .group = GLOBAL_ROOT_GID,
53 .proc_inum = PROC_USER_INIT_INO, 53 .proc_inum = PROC_USER_INIT_INO,
54 .may_mount_sysfs = true,
55 .may_mount_proc = true,
54}; 56};
55EXPORT_SYMBOL_GPL(init_user_ns); 57EXPORT_SYMBOL_GPL(init_user_ns);
56 58
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index b14f4d342043..a54f26f82eb2 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -61,6 +61,15 @@ int create_user_ns(struct cred *new)
61 kgid_t group = new->egid; 61 kgid_t group = new->egid;
62 int ret; 62 int ret;
63 63
64 /*
65 * Verify that we can not violate the policy of which files
66 * may be accessed that is specified by the root directory,
67 * by verifing that the root directory is at the root of the
68 * mount namespace which allows all files to be accessed.
69 */
70 if (current_chrooted())
71 return -EPERM;
72
64 /* The creator needs a mapping in the parent user namespace 73 /* The creator needs a mapping in the parent user namespace
65 * or else we won't be able to reasonably tell userspace who 74 * or else we won't be able to reasonably tell userspace who
66 * created a user_namespace. 75 * created a user_namespace.
@@ -87,6 +96,8 @@ int create_user_ns(struct cred *new)
87 96
88 set_cred_user_ns(new, ns); 97 set_cred_user_ns(new, ns);
89 98
99 update_mnt_policy(ns);
100
90 return 0; 101 return 0;
91} 102}
92 103
diff --git a/lib/kobject.c b/lib/kobject.c
index e07ee1fcd6f1..a65486613d79 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj)
529 return kobj; 529 return kobj;
530} 530}
531 531
532static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
533{
534 if (!kref_get_unless_zero(&kobj->kref))
535 kobj = NULL;
536 return kobj;
537}
538
532/* 539/*
533 * kobject_cleanup - free kobject resources. 540 * kobject_cleanup - free kobject resources.
534 * @kobj: object to cleanup 541 * @kobj: object to cleanup
@@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
751 758
752 list_for_each_entry(k, &kset->list, entry) { 759 list_for_each_entry(k, &kset->list, entry) {
753 if (kobject_name(k) && !strcmp(kobject_name(k), name)) { 760 if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
754 ret = kobject_get(k); 761 ret = kobject_get_unless_zero(k);
755 break; 762 break;
756 } 763 }
757 } 764 }
diff --git a/mm/fremap.c b/mm/fremap.c
index 4723ac8d2fc2..87da3590c61e 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -204,10 +204,8 @@ get_write_lock:
204 unsigned long addr; 204 unsigned long addr;
205 struct file *file = get_file(vma->vm_file); 205 struct file *file = get_file(vma->vm_file);
206 206
207 vm_flags = vma->vm_flags; 207 addr = mmap_region(file, start, size,
208 if (!(flags & MAP_NONBLOCK)) 208 vma->vm_flags, pgoff);
209 vm_flags |= VM_POPULATE;
210 addr = mmap_region(file, start, size, vm_flags, pgoff);
211 fput(file); 209 fput(file);
212 if (IS_ERR_VALUE(addr)) { 210 if (IS_ERR_VALUE(addr)) {
213 err = addr; 211 err = addr;
@@ -226,12 +224,6 @@ get_write_lock:
226 mutex_unlock(&mapping->i_mmap_mutex); 224 mutex_unlock(&mapping->i_mmap_mutex);
227 } 225 }
228 226
229 if (!(flags & MAP_NONBLOCK) && !(vma->vm_flags & VM_POPULATE)) {
230 if (!has_write_lock)
231 goto get_write_lock;
232 vma->vm_flags |= VM_POPULATE;
233 }
234
235 if (vma->vm_flags & VM_LOCKED) { 227 if (vma->vm_flags & VM_LOCKED) {
236 /* 228 /*
237 * drop PG_Mlocked flag for over-mapped range 229 * drop PG_Mlocked flag for over-mapped range
diff --git a/mm/memory.c b/mm/memory.c
index 494526ae024a..13cbc420fead 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -216,6 +216,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
216 tlb->mm = mm; 216 tlb->mm = mm;
217 217
218 tlb->fullmm = fullmm; 218 tlb->fullmm = fullmm;
219 tlb->need_flush_all = 0;
219 tlb->start = -1UL; 220 tlb->start = -1UL;
220 tlb->end = 0; 221 tlb->end = 0;
221 tlb->need_flush = 0; 222 tlb->need_flush = 0;
diff --git a/mm/mlock.c b/mm/mlock.c
index 1c5e33fce639..79b7cf7d1bca 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -358,7 +358,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
358 358
359 newflags = vma->vm_flags & ~VM_LOCKED; 359 newflags = vma->vm_flags & ~VM_LOCKED;
360 if (on) 360 if (on)
361 newflags |= VM_LOCKED | VM_POPULATE; 361 newflags |= VM_LOCKED;
362 362
363 tmp = vma->vm_end; 363 tmp = vma->vm_end;
364 if (tmp > end) 364 if (tmp > end)
@@ -418,8 +418,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
418 * range with the first VMA. Also, skip undesirable VMA types. 418 * range with the first VMA. Also, skip undesirable VMA types.
419 */ 419 */
420 nend = min(end, vma->vm_end); 420 nend = min(end, vma->vm_end);
421 if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) != 421 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
422 VM_POPULATE)
423 continue; 422 continue;
424 if (nstart < vma->vm_start) 423 if (nstart < vma->vm_start)
425 nstart = vma->vm_start; 424 nstart = vma->vm_start;
@@ -492,9 +491,9 @@ static int do_mlockall(int flags)
492 struct vm_area_struct * vma, * prev = NULL; 491 struct vm_area_struct * vma, * prev = NULL;
493 492
494 if (flags & MCL_FUTURE) 493 if (flags & MCL_FUTURE)
495 current->mm->def_flags |= VM_LOCKED | VM_POPULATE; 494 current->mm->def_flags |= VM_LOCKED;
496 else 495 else
497 current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE); 496 current->mm->def_flags &= ~VM_LOCKED;
498 if (flags == MCL_FUTURE) 497 if (flags == MCL_FUTURE)
499 goto out; 498 goto out;
500 499
@@ -503,7 +502,7 @@ static int do_mlockall(int flags)
503 502
504 newflags = vma->vm_flags & ~VM_LOCKED; 503 newflags = vma->vm_flags & ~VM_LOCKED;
505 if (flags & MCL_CURRENT) 504 if (flags & MCL_CURRENT)
506 newflags |= VM_LOCKED | VM_POPULATE; 505 newflags |= VM_LOCKED;
507 506
508 /* Ignore errors */ 507 /* Ignore errors */
509 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); 508 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
diff --git a/mm/mmap.c b/mm/mmap.c
index 2664a47cec93..0db0de1c2fbe 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1306,7 +1306,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1306 } 1306 }
1307 1307
1308 addr = mmap_region(file, addr, len, vm_flags, pgoff); 1308 addr = mmap_region(file, addr, len, vm_flags, pgoff);
1309 if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE)) 1309 if (!IS_ERR_VALUE(addr) &&
1310 ((vm_flags & VM_LOCKED) ||
1311 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1310 *populate = len; 1312 *populate = len;
1311 return addr; 1313 return addr;
1312} 1314}
@@ -1938,7 +1940,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1938 1940
1939 /* Check the cache first. */ 1941 /* Check the cache first. */
1940 /* (Cache hit rate is typically around 35%.) */ 1942 /* (Cache hit rate is typically around 35%.) */
1941 vma = mm->mmap_cache; 1943 vma = ACCESS_ONCE(mm->mmap_cache);
1942 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { 1944 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
1943 struct rb_node *rb_node; 1945 struct rb_node *rb_node;
1944 1946
diff --git a/mm/nommu.c b/mm/nommu.c
index e19328087534..2f3ea749c318 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -821,7 +821,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
821 struct vm_area_struct *vma; 821 struct vm_area_struct *vma;
822 822
823 /* check the cache first */ 823 /* check the cache first */
824 vma = mm->mmap_cache; 824 vma = ACCESS_ONCE(mm->mmap_cache);
825 if (vma && vma->vm_start <= addr && vma->vm_end > addr) 825 if (vma && vma->vm_start <= addr && vma->vm_end > addr)
826 return vma; 826 return vma;
827 827
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a18714469bf7..85addcd9372b 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -86,13 +86,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
86 86
87 grp = &vlan_info->grp; 87 grp = &vlan_info->grp;
88 88
89 /* Take it out of our own structures, but be sure to interlock with
90 * HW accelerating devices or SW vlan input packet processing if
91 * VLAN is not 0 (leave it there for 802.1p).
92 */
93 if (vlan_id)
94 vlan_vid_del(real_dev, vlan_id);
95
96 grp->nr_vlan_devs--; 89 grp->nr_vlan_devs--;
97 90
98 if (vlan->flags & VLAN_FLAG_MVRP) 91 if (vlan->flags & VLAN_FLAG_MVRP)
@@ -114,6 +107,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
114 vlan_gvrp_uninit_applicant(real_dev); 107 vlan_gvrp_uninit_applicant(real_dev);
115 } 108 }
116 109
110 /* Take it out of our own structures, but be sure to interlock with
111 * HW accelerating devices or SW vlan input packet processing if
112 * VLAN is not 0 (leave it there for 802.1p).
113 */
114 if (vlan_id)
115 vlan_vid_del(real_dev, vlan_id);
116
117 /* Get rid of the vlan's reference to real_dev */ 117 /* Get rid of the vlan's reference to real_dev */
118 dev_put(real_dev); 118 dev_put(real_dev);
119} 119}
diff --git a/net/atm/common.c b/net/atm/common.c
index 7b491006eaf4..737bef59ce89 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -531,6 +531,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
531 struct sk_buff *skb; 531 struct sk_buff *skb;
532 int copied, error = -EINVAL; 532 int copied, error = -EINVAL;
533 533
534 msg->msg_namelen = 0;
535
534 if (sock->state != SS_CONNECTED) 536 if (sock->state != SS_CONNECTED)
535 return -ENOTCONN; 537 return -ENOTCONN;
536 538
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 7b11f8bc5071..e277e38f736b 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1642,6 +1642,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
1642 ax25_address src; 1642 ax25_address src;
1643 const unsigned char *mac = skb_mac_header(skb); 1643 const unsigned char *mac = skb_mac_header(skb);
1644 1644
1645 memset(sax, 0, sizeof(struct full_sockaddr_ax25));
1645 ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, 1646 ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
1646 &digi, NULL, NULL); 1647 &digi, NULL, NULL);
1647 sax->sax25_family = AF_AX25; 1648 sax->sax25_family = AF_AX25;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index d3ee69b35a78..0d1b08cc76e1 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
230 if (flags & (MSG_OOB)) 230 if (flags & (MSG_OOB))
231 return -EOPNOTSUPP; 231 return -EOPNOTSUPP;
232 232
233 msg->msg_namelen = 0;
234
233 skb = skb_recv_datagram(sk, flags, noblock, &err); 235 skb = skb_recv_datagram(sk, flags, noblock, &err);
234 if (!skb) { 236 if (!skb) {
235 if (sk->sk_shutdown & RCV_SHUTDOWN) 237 if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
237 return err; 239 return err;
238 } 240 }
239 241
240 msg->msg_namelen = 0;
241
242 copied = skb->len; 242 copied = skb->len;
243 if (len < copied) { 243 if (len < copied) {
244 msg->msg_flags |= MSG_TRUNC; 244 msg->msg_flags |= MSG_TRUNC;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index c23bae86263b..7c9224bcce17 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -608,6 +608,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
608 608
609 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { 609 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
610 rfcomm_dlc_accept(d); 610 rfcomm_dlc_accept(d);
611 msg->msg_namelen = 0;
611 return 0; 612 return 0;
612 } 613 }
613 614
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 79d87d8d4f51..fb6192c9812e 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -359,6 +359,7 @@ static void __sco_sock_close(struct sock *sk)
359 sco_chan_del(sk, ECONNRESET); 359 sco_chan_del(sk, ECONNRESET);
360 break; 360 break;
361 361
362 case BT_CONNECT2:
362 case BT_CONNECT: 363 case BT_CONNECT:
363 case BT_DISCONN: 364 case BT_DISCONN:
364 sco_chan_del(sk, ECONNRESET); 365 sco_chan_del(sk, ECONNRESET);
@@ -664,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
664 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { 665 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
665 hci_conn_accept(pi->conn->hcon, 0); 666 hci_conn_accept(pi->conn->hcon, 0);
666 sk->sk_state = BT_CONFIG; 667 sk->sk_state = BT_CONFIG;
668 msg->msg_namelen = 0;
667 669
668 release_sock(sk); 670 release_sock(sk);
669 return 0; 671 return 0;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index b0812c91c0f0..bab338e6270d 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -423,7 +423,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
423 return 0; 423 return 0;
424 br_warn(br, "adding interface %s with same address " 424 br_warn(br, "adding interface %s with same address "
425 "as a received packet\n", 425 "as a received packet\n",
426 source->dev->name); 426 source ? source->dev->name : br->dev->name);
427 fdb_delete(br, fdb); 427 fdb_delete(br, fdb);
428 } 428 }
429 429
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 095259f83902..ff2ff3ce6965 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
286 if (m->msg_flags&MSG_OOB) 286 if (m->msg_flags&MSG_OOB)
287 goto read_error; 287 goto read_error;
288 288
289 m->msg_namelen = 0;
290
289 skb = skb_recv_datagram(sk, flags, 0 , &ret); 291 skb = skb_recv_datagram(sk, flags, 0 , &ret);
290 if (!skb) 292 if (!skb)
291 goto read_error; 293 goto read_error;
diff --git a/net/can/gw.c b/net/can/gw.c
index 2d117dc5ebea..117814a7e73c 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -466,7 +466,7 @@ static int cgw_notifier(struct notifier_block *nb,
466 if (gwj->src.dev == dev || gwj->dst.dev == dev) { 466 if (gwj->src.dev == dev || gwj->dst.dev == dev) {
467 hlist_del(&gwj->list); 467 hlist_del(&gwj->list);
468 cgw_unregister_filter(gwj); 468 cgw_unregister_filter(gwj);
469 kfree(gwj); 469 kmem_cache_free(cgw_cache, gwj);
470 } 470 }
471 } 471 }
472 } 472 }
@@ -864,7 +864,7 @@ static void cgw_remove_all_jobs(void)
864 hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { 864 hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
865 hlist_del(&gwj->list); 865 hlist_del(&gwj->list);
866 cgw_unregister_filter(gwj); 866 cgw_unregister_filter(gwj);
867 kfree(gwj); 867 kmem_cache_free(cgw_cache, gwj);
868 } 868 }
869} 869}
870 870
@@ -920,7 +920,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
920 920
921 hlist_del(&gwj->list); 921 hlist_del(&gwj->list);
922 cgw_unregister_filter(gwj); 922 cgw_unregister_filter(gwj);
923 kfree(gwj); 923 kmem_cache_free(cgw_cache, gwj);
924 err = 0; 924 err = 0;
925 break; 925 break;
926 } 926 }
diff --git a/net/core/dev.c b/net/core/dev.c
index d540ced1f6c6..e7d68ed8aafe 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1545,7 +1545,6 @@ void net_enable_timestamp(void)
1545 return; 1545 return;
1546 } 1546 }
1547#endif 1547#endif
1548 WARN_ON(in_interrupt());
1549 static_key_slow_inc(&netstamp_needed); 1548 static_key_slow_inc(&netstamp_needed);
1550} 1549}
1551EXPORT_SYMBOL(net_enable_timestamp); 1550EXPORT_SYMBOL(net_enable_timestamp);
@@ -1625,7 +1624,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1625 } 1624 }
1626 1625
1627 skb_orphan(skb); 1626 skb_orphan(skb);
1628 nf_reset(skb);
1629 1627
1630 if (unlikely(!is_skb_forwardable(dev, skb))) { 1628 if (unlikely(!is_skb_forwardable(dev, skb))) {
1631 atomic_long_inc(&dev->rx_dropped); 1629 atomic_long_inc(&dev->rx_dropped);
@@ -1641,6 +1639,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1641 skb->mark = 0; 1639 skb->mark = 0;
1642 secpath_reset(skb); 1640 secpath_reset(skb);
1643 nf_reset(skb); 1641 nf_reset(skb);
1642 nf_reset_trace(skb);
1644 return netif_rx(skb); 1643 return netif_rx(skb);
1645} 1644}
1646EXPORT_SYMBOL_GPL(dev_forward_skb); 1645EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -3315,6 +3314,7 @@ int netdev_rx_handler_register(struct net_device *dev,
3315 if (dev->rx_handler) 3314 if (dev->rx_handler)
3316 return -EBUSY; 3315 return -EBUSY;
3317 3316
3317 /* Note: rx_handler_data must be set before rx_handler */
3318 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 3318 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3319 rcu_assign_pointer(dev->rx_handler, rx_handler); 3319 rcu_assign_pointer(dev->rx_handler, rx_handler);
3320 3320
@@ -3335,6 +3335,11 @@ void netdev_rx_handler_unregister(struct net_device *dev)
3335 3335
3336 ASSERT_RTNL(); 3336 ASSERT_RTNL();
3337 RCU_INIT_POINTER(dev->rx_handler, NULL); 3337 RCU_INIT_POINTER(dev->rx_handler, NULL);
3338 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3339 * section has a guarantee to see a non NULL rx_handler_data
3340 * as well.
3341 */
3342 synchronize_net();
3338 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 3343 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3339} 3344}
3340EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3345EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index bd2eb9d3e369..abdc9e6ef33e 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -37,7 +37,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
37 ha->type = addr_type; 37 ha->type = addr_type;
38 ha->refcount = 1; 38 ha->refcount = 1;
39 ha->global_use = global; 39 ha->global_use = global;
40 ha->synced = false; 40 ha->synced = 0;
41 list_add_tail_rcu(&ha->list, &list->list); 41 list_add_tail_rcu(&ha->list, &list->list);
42 list->count++; 42 list->count++;
43 43
@@ -165,7 +165,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
165 addr_len, ha->type); 165 addr_len, ha->type);
166 if (err) 166 if (err)
167 break; 167 break;
168 ha->synced = true; 168 ha->synced++;
169 ha->refcount++; 169 ha->refcount++;
170 } else if (ha->refcount == 1) { 170 } else if (ha->refcount == 1) {
171 __hw_addr_del(to_list, ha->addr, addr_len, ha->type); 171 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
@@ -186,7 +186,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
186 if (ha->synced) { 186 if (ha->synced) {
187 __hw_addr_del(to_list, ha->addr, 187 __hw_addr_del(to_list, ha->addr,
188 addr_len, ha->type); 188 addr_len, ha->type);
189 ha->synced = false; 189 ha->synced--;
190 __hw_addr_del(from_list, ha->addr, 190 __hw_addr_del(from_list, ha->addr,
191 addr_len, ha->type); 191 addr_len, ha->type);
192 } 192 }
diff --git a/net/core/flow.c b/net/core/flow.c
index c56ea6f7f6c7..2bfd081c59f7 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -328,7 +328,7 @@ static void flow_cache_flush_per_cpu(void *data)
328 struct flow_flush_info *info = data; 328 struct flow_flush_info *info = data;
329 struct tasklet_struct *tasklet; 329 struct tasklet_struct *tasklet;
330 330
331 tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); 331 tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
332 tasklet->data = (unsigned long)info; 332 tasklet->data = (unsigned long)info;
333 tasklet_schedule(tasklet); 333 tasklet_schedule(tasklet);
334} 334}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 9d4c7201400d..e187bf06d673 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -140,6 +140,8 @@ ipv6:
140 flow->ports = *ports; 140 flow->ports = *ports;
141 } 141 }
142 142
143 flow->thoff = (u16) nhoff;
144
143 return true; 145 return true;
144} 146}
145EXPORT_SYMBOL(skb_flow_dissect); 147EXPORT_SYMBOL(skb_flow_dissect);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5fb8d7e47294..23854b51a259 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -496,8 +496,10 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
496 } 496 }
497 if (ops->fill_info) { 497 if (ops->fill_info) {
498 data = nla_nest_start(skb, IFLA_INFO_DATA); 498 data = nla_nest_start(skb, IFLA_INFO_DATA);
499 if (data == NULL) 499 if (data == NULL) {
500 err = -EMSGSIZE;
500 goto err_cancel_link; 501 goto err_cancel_link;
502 }
501 err = ops->fill_info(skb, dev); 503 err = ops->fill_info(skb, dev);
502 if (err < 0) 504 if (err < 0)
503 goto err_cancel_data; 505 goto err_cancel_data;
@@ -1070,7 +1072,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1070 rcu_read_lock(); 1072 rcu_read_lock();
1071 cb->seq = net->dev_base_seq; 1073 cb->seq = net->dev_base_seq;
1072 1074
1073 if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, 1075 if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
1074 ifla_policy) >= 0) { 1076 ifla_policy) >= 0) {
1075 1077
1076 if (tb[IFLA_EXT_MASK]) 1078 if (tb[IFLA_EXT_MASK])
@@ -1920,7 +1922,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
1920 u32 ext_filter_mask = 0; 1922 u32 ext_filter_mask = 0;
1921 u16 min_ifinfo_dump_size = 0; 1923 u16 min_ifinfo_dump_size = 0;
1922 1924
1923 if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, 1925 if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
1924 ifla_policy) >= 0) { 1926 ifla_policy) >= 0) {
1925 if (tb[IFLA_EXT_MASK]) 1927 if (tb[IFLA_EXT_MASK])
1926 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 1928 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
diff --git a/net/core/scm.c b/net/core/scm.c
index 905dcc6ad1e3..2dc6cdaaae8a 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -24,6 +24,7 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/security.h> 26#include <linux/security.h>
27#include <linux/pid_namespace.h>
27#include <linux/pid.h> 28#include <linux/pid.h>
28#include <linux/nsproxy.h> 29#include <linux/nsproxy.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
@@ -52,7 +53,8 @@ static __inline__ int scm_check_creds(struct ucred *creds)
52 if (!uid_valid(uid) || !gid_valid(gid)) 53 if (!uid_valid(uid) || !gid_valid(gid))
53 return -EINVAL; 54 return -EINVAL;
54 55
55 if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) && 56 if ((creds->pid == task_tgid_vnr(current) ||
57 ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
56 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || 58 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
57 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && 59 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
58 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || 60 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 68f6a94f7661..c929d9c1c4b6 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1333,8 +1333,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1333 iph->frag_off |= htons(IP_MF); 1333 iph->frag_off |= htons(IP_MF);
1334 offset += (skb->len - skb->mac_len - iph->ihl * 4); 1334 offset += (skb->len - skb->mac_len - iph->ihl * 4);
1335 } else { 1335 } else {
1336 if (!(iph->frag_off & htons(IP_DF))) 1336 iph->id = htons(id++);
1337 iph->id = htons(id++);
1338 } 1337 }
1339 iph->tot_len = htons(skb->len - skb->mac_len); 1338 iph->tot_len = htons(skb->len - skb->mac_len);
1340 iph->check = 0; 1339 iph->check = 0;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index f678507bc829..c6287cd978c2 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -587,13 +587,16 @@ static void check_lifetime(struct work_struct *work)
587{ 587{
588 unsigned long now, next, next_sec, next_sched; 588 unsigned long now, next, next_sec, next_sched;
589 struct in_ifaddr *ifa; 589 struct in_ifaddr *ifa;
590 struct hlist_node *n;
590 int i; 591 int i;
591 592
592 now = jiffies; 593 now = jiffies;
593 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); 594 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
594 595
595 rcu_read_lock();
596 for (i = 0; i < IN4_ADDR_HSIZE; i++) { 596 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
597 bool change_needed = false;
598
599 rcu_read_lock();
597 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { 600 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
598 unsigned long age; 601 unsigned long age;
599 602
@@ -606,16 +609,7 @@ static void check_lifetime(struct work_struct *work)
606 609
607 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && 610 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
608 age >= ifa->ifa_valid_lft) { 611 age >= ifa->ifa_valid_lft) {
609 struct in_ifaddr **ifap ; 612 change_needed = true;
610
611 rtnl_lock();
612 for (ifap = &ifa->ifa_dev->ifa_list;
613 *ifap != NULL; ifap = &ifa->ifa_next) {
614 if (*ifap == ifa)
615 inet_del_ifa(ifa->ifa_dev,
616 ifap, 1);
617 }
618 rtnl_unlock();
619 } else if (ifa->ifa_preferred_lft == 613 } else if (ifa->ifa_preferred_lft ==
620 INFINITY_LIFE_TIME) { 614 INFINITY_LIFE_TIME) {
621 continue; 615 continue;
@@ -625,10 +619,8 @@ static void check_lifetime(struct work_struct *work)
625 next = ifa->ifa_tstamp + 619 next = ifa->ifa_tstamp +
626 ifa->ifa_valid_lft * HZ; 620 ifa->ifa_valid_lft * HZ;
627 621
628 if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) { 622 if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
629 ifa->ifa_flags |= IFA_F_DEPRECATED; 623 change_needed = true;
630 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
631 }
632 } else if (time_before(ifa->ifa_tstamp + 624 } else if (time_before(ifa->ifa_tstamp +
633 ifa->ifa_preferred_lft * HZ, 625 ifa->ifa_preferred_lft * HZ,
634 next)) { 626 next)) {
@@ -636,8 +628,42 @@ static void check_lifetime(struct work_struct *work)
636 ifa->ifa_preferred_lft * HZ; 628 ifa->ifa_preferred_lft * HZ;
637 } 629 }
638 } 630 }
631 rcu_read_unlock();
632 if (!change_needed)
633 continue;
634 rtnl_lock();
635 hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
636 unsigned long age;
637
638 if (ifa->ifa_flags & IFA_F_PERMANENT)
639 continue;
640
641 /* We try to batch several events at once. */
642 age = (now - ifa->ifa_tstamp +
643 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
644
645 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
646 age >= ifa->ifa_valid_lft) {
647 struct in_ifaddr **ifap;
648
649 for (ifap = &ifa->ifa_dev->ifa_list;
650 *ifap != NULL; ifap = &(*ifap)->ifa_next) {
651 if (*ifap == ifa) {
652 inet_del_ifa(ifa->ifa_dev,
653 ifap, 1);
654 break;
655 }
656 }
657 } else if (ifa->ifa_preferred_lft !=
658 INFINITY_LIFE_TIME &&
659 age >= ifa->ifa_preferred_lft &&
660 !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
661 ifa->ifa_flags |= IFA_F_DEPRECATED;
662 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
663 }
664 }
665 rtnl_unlock();
639 } 666 }
640 rcu_read_unlock();
641 667
642 next_sec = round_jiffies_up(next); 668 next_sec = round_jiffies_up(next);
643 next_sched = next; 669 next_sched = next;
@@ -802,8 +828,12 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
802 if (nlh->nlmsg_flags & NLM_F_EXCL || 828 if (nlh->nlmsg_flags & NLM_F_EXCL ||
803 !(nlh->nlmsg_flags & NLM_F_REPLACE)) 829 !(nlh->nlmsg_flags & NLM_F_REPLACE))
804 return -EEXIST; 830 return -EEXIST;
805 831 ifa = ifa_existing;
806 set_ifa_lifetime(ifa_existing, valid_lft, prefered_lft); 832 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
833 cancel_delayed_work(&check_lifetime_work);
834 schedule_delayed_work(&check_lifetime_work, 0);
835 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
836 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
807 } 837 }
808 return 0; 838 return 0;
809} 839}
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 98cbc6877019..bf6c5cf31aed 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1522,7 +1522,8 @@ static int __init ip_auto_config(void)
1522 } 1522 }
1523 for (i++; i < CONF_NAMESERVERS_MAX; i++) 1523 for (i++; i < CONF_NAMESERVERS_MAX; i++)
1524 if (ic_nameservers[i] != NONE) 1524 if (ic_nameservers[i] != NONE)
1525 pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]); 1525 pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]);
1526 pr_cont("\n");
1526#endif /* !SILENT */ 1527#endif /* !SILENT */
1527 1528
1528 return 0; 1529 return 0;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index ce2d43e1f09f..0d755c50994b 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -36,19 +36,6 @@ config NF_CONNTRACK_PROC_COMPAT
36 36
37 If unsure, say Y. 37 If unsure, say Y.
38 38
39config IP_NF_QUEUE
40 tristate "IP Userspace queueing via NETLINK (OBSOLETE)"
41 depends on NETFILTER_ADVANCED
42 help
43 Netfilter has the ability to queue packets to user space: the
44 netlink device can be used to access them using this driver.
45
46 This option enables the old IPv4-only "ip_queue" implementation
47 which has been obsoleted by the new "nfnetlink_queue" code (see
48 CONFIG_NETFILTER_NETLINK_QUEUE).
49
50 To compile it as a module, choose M here. If unsure, say N.
51
52config IP_NF_IPTABLES 39config IP_NF_IPTABLES
53 tristate "IP tables support (required for filtering/masq/NAT)" 40 tristate "IP tables support (required for filtering/masq/NAT)"
54 default m if NETFILTER_ADVANCED=n 41 default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0d9bdacce99f..3bd55bad230a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2059,11 +2059,8 @@ void tcp_enter_loss(struct sock *sk, int how)
2059 if (tcp_is_reno(tp)) 2059 if (tcp_is_reno(tp))
2060 tcp_reset_reno_sack(tp); 2060 tcp_reset_reno_sack(tp);
2061 2061
2062 if (!how) { 2062 tp->undo_marker = tp->snd_una;
2063 /* Push undo marker, if it was plain RTO and nothing 2063 if (how) {
2064 * was retransmitted. */
2065 tp->undo_marker = tp->snd_una;
2066 } else {
2067 tp->sacked_out = 0; 2064 tp->sacked_out = 0;
2068 tp->fackets_out = 0; 2065 tp->fackets_out = 0;
2069 } 2066 }
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 817fbb396bc8..b44cf81d8178 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1809,8 +1809,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1809 goto send_now; 1809 goto send_now;
1810 } 1810 }
1811 1811
1812 /* Ok, it looks like it is advisable to defer. */ 1812 /* Ok, it looks like it is advisable to defer.
1813 tp->tso_deferred = 1 | (jiffies << 1); 1813 * Do not rearm the timer if already set to not break TCP ACK clocking.
1814 */
1815 if (!tp->tso_deferred)
1816 tp->tso_deferred = 1 | (jiffies << 1);
1814 1817
1815 return true; 1818 return true;
1816 1819
@@ -2706,6 +2709,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2706 skb_reserve(skb, MAX_TCP_HEADER); 2709 skb_reserve(skb, MAX_TCP_HEADER);
2707 2710
2708 skb_dst_set(skb, dst); 2711 skb_dst_set(skb, dst);
2712 security_skb_owned_by(skb, sk);
2709 2713
2710 mss = dst_metric_advmss(dst); 2714 mss = dst_metric_advmss(dst);
2711 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2715 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 265c42cf963c..0a073a263720 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1762,9 +1762,16 @@ int udp_rcv(struct sk_buff *skb)
1762 1762
1763void udp_destroy_sock(struct sock *sk) 1763void udp_destroy_sock(struct sock *sk)
1764{ 1764{
1765 struct udp_sock *up = udp_sk(sk);
1765 bool slow = lock_sock_fast(sk); 1766 bool slow = lock_sock_fast(sk);
1766 udp_flush_pending_frames(sk); 1767 udp_flush_pending_frames(sk);
1767 unlock_sock_fast(sk, slow); 1768 unlock_sock_fast(sk, slow);
1769 if (static_key_false(&udp_encap_needed) && up->encap_type) {
1770 void (*encap_destroy)(struct sock *sk);
1771 encap_destroy = ACCESS_ONCE(up->encap_destroy);
1772 if (encap_destroy)
1773 encap_destroy(sk);
1774 }
1768} 1775}
1769 1776
1770/* 1777/*
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f2c7e615f902..a459c4f5b769 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2529,6 +2529,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2529static void init_loopback(struct net_device *dev) 2529static void init_loopback(struct net_device *dev)
2530{ 2530{
2531 struct inet6_dev *idev; 2531 struct inet6_dev *idev;
2532 struct net_device *sp_dev;
2533 struct inet6_ifaddr *sp_ifa;
2534 struct rt6_info *sp_rt;
2532 2535
2533 /* ::1 */ 2536 /* ::1 */
2534 2537
@@ -2540,6 +2543,30 @@ static void init_loopback(struct net_device *dev)
2540 } 2543 }
2541 2544
2542 add_addr(idev, &in6addr_loopback, 128, IFA_HOST); 2545 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
2546
2547 /* Add routes to other interface's IPv6 addresses */
2548 for_each_netdev(dev_net(dev), sp_dev) {
2549 if (!strcmp(sp_dev->name, dev->name))
2550 continue;
2551
2552 idev = __in6_dev_get(sp_dev);
2553 if (!idev)
2554 continue;
2555
2556 read_lock_bh(&idev->lock);
2557 list_for_each_entry(sp_ifa, &idev->addr_list, if_list) {
2558
2559 if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
2560 continue;
2561
2562 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
2563
2564 /* Failure cases are ignored */
2565 if (!IS_ERR(sp_rt))
2566 ip6_ins_rt(sp_rt);
2567 }
2568 read_unlock_bh(&idev->lock);
2569 }
2543} 2570}
2544 2571
2545static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) 2572static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
@@ -4784,26 +4811,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
4784 4811
4785static int __net_init addrconf_init_net(struct net *net) 4812static int __net_init addrconf_init_net(struct net *net)
4786{ 4813{
4787 int err; 4814 int err = -ENOMEM;
4788 struct ipv6_devconf *all, *dflt; 4815 struct ipv6_devconf *all, *dflt;
4789 4816
4790 err = -ENOMEM; 4817 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
4791 all = &ipv6_devconf; 4818 if (all == NULL)
4792 dflt = &ipv6_devconf_dflt; 4819 goto err_alloc_all;
4793 4820
4794 if (!net_eq(net, &init_net)) { 4821 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
4795 all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL); 4822 if (dflt == NULL)
4796 if (all == NULL) 4823 goto err_alloc_dflt;
4797 goto err_alloc_all;
4798 4824
4799 dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); 4825 /* these will be inherited by all namespaces */
4800 if (dflt == NULL) 4826 dflt->autoconf = ipv6_defaults.autoconf;
4801 goto err_alloc_dflt; 4827 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
4802 } else {
4803 /* these will be inherited by all namespaces */
4804 dflt->autoconf = ipv6_defaults.autoconf;
4805 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
4806 }
4807 4828
4808 net->ipv6.devconf_all = all; 4829 net->ipv6.devconf_all = all;
4809 net->ipv6.devconf_dflt = dflt; 4830 net->ipv6.devconf_dflt = dflt;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index e33fe0ab2568..2bab2aa59745 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -118,6 +118,18 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
118 ipv6_addr_loopback(&hdr->daddr)) 118 ipv6_addr_loopback(&hdr->daddr))
119 goto err; 119 goto err;
120 120
121 /* RFC4291 Errata ID: 3480
122 * Interface-Local scope spans only a single interface on a
123 * node and is useful only for loopback transmission of
124 * multicast. Packets with interface-local scope received
125 * from another node must be discarded.
126 */
127 if (!(skb->pkt_type == PACKET_LOOPBACK ||
128 dev->flags & IFF_LOOPBACK) &&
129 ipv6_addr_is_multicast(&hdr->daddr) &&
130 IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
131 goto err;
132
121 /* RFC4291 2.7 133 /* RFC4291 2.7
122 * Nodes must not originate a packet to a multicast address whose scope 134 * Nodes must not originate a packet to a multicast address whose scope
123 * field contains the reserved value 0; if such a packet is received, it 135 * field contains the reserved value 0; if such a packet is received, it
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
index 83acc1405a18..cb631143721c 100644
--- a/net/ipv6/netfilter/ip6t_NPT.c
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
57 if (pfx_len - i >= 32) 57 if (pfx_len - i >= 32)
58 mask = 0; 58 mask = 0;
59 else 59 else
60 mask = htonl(~((1 << (pfx_len - i)) - 1)); 60 mask = htonl((1 << (i - pfx_len + 32)) - 1);
61 61
62 idx = i / 32; 62 idx = i / 32;
63 addr->s6_addr32[idx] &= mask; 63 addr->s6_addr32[idx] &= mask;
@@ -114,6 +114,7 @@ ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
114static struct xt_target ip6t_npt_target_reg[] __read_mostly = { 114static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
115 { 115 {
116 .name = "SNPT", 116 .name = "SNPT",
117 .table = "mangle",
117 .target = ip6t_snpt_tg, 118 .target = ip6t_snpt_tg,
118 .targetsize = sizeof(struct ip6t_npt_tginfo), 119 .targetsize = sizeof(struct ip6t_npt_tginfo),
119 .checkentry = ip6t_npt_checkentry, 120 .checkentry = ip6t_npt_checkentry,
@@ -124,6 +125,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
124 }, 125 },
125 { 126 {
126 .name = "DNPT", 127 .name = "DNPT",
128 .table = "mangle",
127 .target = ip6t_dnpt_tg, 129 .target = ip6t_dnpt_tg,
128 .targetsize = sizeof(struct ip6t_npt_tginfo), 130 .targetsize = sizeof(struct ip6t_npt_tginfo),
129 .checkentry = ip6t_npt_checkentry, 131 .checkentry = ip6t_npt_checkentry,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f6d629fd6aee..46a5be85be87 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -386,6 +386,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
386 386
387 if (dst) 387 if (dst)
388 dst->ops->redirect(dst, sk, skb); 388 dst->ops->redirect(dst, sk, skb);
389 goto out;
389 } 390 }
390 391
391 if (type == ICMPV6_PKT_TOOBIG) { 392 if (type == ICMPV6_PKT_TOOBIG) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 599e1ba6d1ce..d8e5e852fc7a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1285,10 +1285,18 @@ do_confirm:
1285 1285
1286void udpv6_destroy_sock(struct sock *sk) 1286void udpv6_destroy_sock(struct sock *sk)
1287{ 1287{
1288 struct udp_sock *up = udp_sk(sk);
1288 lock_sock(sk); 1289 lock_sock(sk);
1289 udp_v6_flush_pending_frames(sk); 1290 udp_v6_flush_pending_frames(sk);
1290 release_sock(sk); 1291 release_sock(sk);
1291 1292
1293 if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
1294 void (*encap_destroy)(struct sock *sk);
1295 encap_destroy = ACCESS_ONCE(up->encap_destroy);
1296 if (encap_destroy)
1297 encap_destroy(sk);
1298 }
1299
1292 inet6_destroy_sock(sk); 1300 inet6_destroy_sock(sk);
1293} 1301}
1294 1302
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index d07e3a626446..e493b3397ae3 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
1386 1386
1387 IRDA_DEBUG(4, "%s()\n", __func__); 1387 IRDA_DEBUG(4, "%s()\n", __func__);
1388 1388
1389 msg->msg_namelen = 0;
1390
1389 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 1391 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1390 flags & MSG_DONTWAIT, &err); 1392 flags & MSG_DONTWAIT, &err);
1391 if (!skb) 1393 if (!skb)
@@ -2583,8 +2585,10 @@ bed:
2583 NULL, NULL, NULL); 2585 NULL, NULL, NULL);
2584 2586
2585 /* Check if the we got some results */ 2587 /* Check if the we got some results */
2586 if (!self->cachedaddr) 2588 if (!self->cachedaddr) {
2587 return -EAGAIN; /* Didn't find any devices */ 2589 err = -EAGAIN; /* Didn't find any devices */
2590 goto out;
2591 }
2588 daddr = self->cachedaddr; 2592 daddr = self->cachedaddr;
2589 /* Cleanup */ 2593 /* Cleanup */
2590 self->cachedaddr = 0; 2594 self->cachedaddr = 0;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a7d11ffe4284..206ce6db2c36 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] =
49 49
50#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) 50#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
51 51
52/* macros to set/get socket control buffer at correct offset */
53#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
54#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
55#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
56#define CB_TRGCLS_LEN (TRGCLS_SIZE)
57
58#define __iucv_sock_wait(sk, condition, timeo, ret) \ 52#define __iucv_sock_wait(sk, condition, timeo, ret) \
59do { \ 53do { \
60 DEFINE_WAIT(__wait); \ 54 DEFINE_WAIT(__wait); \
@@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1141 1135
1142 /* increment and save iucv message tag for msg_completion cbk */ 1136 /* increment and save iucv message tag for msg_completion cbk */
1143 txmsg.tag = iucv->send_tag++; 1137 txmsg.tag = iucv->send_tag++;
1144 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 1138 IUCV_SKB_CB(skb)->tag = txmsg.tag;
1145 1139
1146 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1140 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1147 atomic_inc(&iucv->msg_sent); 1141 atomic_inc(&iucv->msg_sent);
@@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1224 return -ENOMEM; 1218 return -ENOMEM;
1225 1219
1226 /* copy target class to control buffer of new skb */ 1220 /* copy target class to control buffer of new skb */
1227 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); 1221 IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
1228 1222
1229 /* copy data fragment */ 1223 /* copy data fragment */
1230 memcpy(nskb->data, skb->data + copied, size); 1224 memcpy(nskb->data, skb->data + copied, size);
@@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1256 1250
1257 /* store msg target class in the second 4 bytes of skb ctrl buffer */ 1251 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1258 /* Note: the first 4 bytes are reserved for msg tag */ 1252 /* Note: the first 4 bytes are reserved for msg tag */
1259 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); 1253 IUCV_SKB_CB(skb)->class = msg->class;
1260 1254
1261 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ 1255 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1262 if ((msg->flags & IUCV_IPRMDATA) && len > 7) { 1256 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
@@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1292 } 1286 }
1293 } 1287 }
1294 1288
1289 IUCV_SKB_CB(skb)->offset = 0;
1295 if (sock_queue_rcv_skb(sk, skb)) 1290 if (sock_queue_rcv_skb(sk, skb))
1296 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); 1291 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1297} 1292}
@@ -1327,6 +1322,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1327 unsigned int copied, rlen; 1322 unsigned int copied, rlen;
1328 struct sk_buff *skb, *rskb, *cskb; 1323 struct sk_buff *skb, *rskb, *cskb;
1329 int err = 0; 1324 int err = 0;
1325 u32 offset;
1326
1327 msg->msg_namelen = 0;
1330 1328
1331 if ((sk->sk_state == IUCV_DISCONN) && 1329 if ((sk->sk_state == IUCV_DISCONN) &&
1332 skb_queue_empty(&iucv->backlog_skb_q) && 1330 skb_queue_empty(&iucv->backlog_skb_q) &&
@@ -1346,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1346 return err; 1344 return err;
1347 } 1345 }
1348 1346
1349 rlen = skb->len; /* real length of skb */ 1347 offset = IUCV_SKB_CB(skb)->offset;
1348 rlen = skb->len - offset; /* real length of skb */
1350 copied = min_t(unsigned int, rlen, len); 1349 copied = min_t(unsigned int, rlen, len);
1351 if (!rlen) 1350 if (!rlen)
1352 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; 1351 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1353 1352
1354 cskb = skb; 1353 cskb = skb;
1355 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { 1354 if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {
1356 if (!(flags & MSG_PEEK)) 1355 if (!(flags & MSG_PEEK))
1357 skb_queue_head(&sk->sk_receive_queue, skb); 1356 skb_queue_head(&sk->sk_receive_queue, skb);
1358 return -EFAULT; 1357 return -EFAULT;
@@ -1370,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1370 * get the trgcls from the control buffer of the skb due to 1369 * get the trgcls from the control buffer of the skb due to
1371 * fragmentation of original iucv message. */ 1370 * fragmentation of original iucv message. */
1372 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, 1371 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1373 CB_TRGCLS_LEN, CB_TRGCLS(skb)); 1372 sizeof(IUCV_SKB_CB(skb)->class),
1373 (void *)&IUCV_SKB_CB(skb)->class);
1374 if (err) { 1374 if (err) {
1375 if (!(flags & MSG_PEEK)) 1375 if (!(flags & MSG_PEEK))
1376 skb_queue_head(&sk->sk_receive_queue, skb); 1376 skb_queue_head(&sk->sk_receive_queue, skb);
@@ -1382,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1382 1382
1383 /* SOCK_STREAM: re-queue skb if it contains unreceived data */ 1383 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1384 if (sk->sk_type == SOCK_STREAM) { 1384 if (sk->sk_type == SOCK_STREAM) {
1385 skb_pull(skb, copied); 1385 if (copied < rlen) {
1386 if (skb->len) { 1386 IUCV_SKB_CB(skb)->offset = offset + copied;
1387 skb_queue_head(&sk->sk_receive_queue, skb);
1388 goto done; 1387 goto done;
1389 } 1388 }
1390 } 1389 }
@@ -1403,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1403 spin_lock_bh(&iucv->message_q.lock); 1402 spin_lock_bh(&iucv->message_q.lock);
1404 rskb = skb_dequeue(&iucv->backlog_skb_q); 1403 rskb = skb_dequeue(&iucv->backlog_skb_q);
1405 while (rskb) { 1404 while (rskb) {
1405 IUCV_SKB_CB(rskb)->offset = 0;
1406 if (sock_queue_rcv_skb(sk, rskb)) { 1406 if (sock_queue_rcv_skb(sk, rskb)) {
1407 skb_queue_head(&iucv->backlog_skb_q, 1407 skb_queue_head(&iucv->backlog_skb_q,
1408 rskb); 1408 rskb);
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1830 spin_lock_irqsave(&list->lock, flags); 1830 spin_lock_irqsave(&list->lock, flags);
1831 1831
1832 while (list_skb != (struct sk_buff *)list) { 1832 while (list_skb != (struct sk_buff *)list) {
1833 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { 1833 if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
1834 this = list_skb; 1834 this = list_skb;
1835 break; 1835 break;
1836 } 1836 }
@@ -2091,6 +2091,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2091 skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); 2091 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2092 skb_reset_transport_header(skb); 2092 skb_reset_transport_header(skb);
2093 skb_reset_network_header(skb); 2093 skb_reset_network_header(skb);
2094 IUCV_SKB_CB(skb)->offset = 0;
2094 spin_lock(&iucv->message_q.lock); 2095 spin_lock(&iucv->message_q.lock);
2095 if (skb_queue_empty(&iucv->backlog_skb_q)) { 2096 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2096 if (sock_queue_rcv_skb(sk, skb)) { 2097 if (sock_queue_rcv_skb(sk, skb)) {
@@ -2195,8 +2196,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2195 /* fall through and receive zero length data */ 2196 /* fall through and receive zero length data */
2196 case 0: 2197 case 0:
2197 /* plain data frame */ 2198 /* plain data frame */
2198 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, 2199 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2199 CB_TRGCLS_LEN);
2200 err = afiucv_hs_callback_rx(sk, skb); 2200 err = afiucv_hs_callback_rx(sk, skb);
2201 break; 2201 break;
2202 default: 2202 default:
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 8555f331ea60..5b1e5af25713 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2693,6 +2693,7 @@ static int key_notify_policy_flush(const struct km_event *c)
2693 hdr->sadb_msg_pid = c->portid; 2693 hdr->sadb_msg_pid = c->portid;
2694 hdr->sadb_msg_version = PF_KEY_V2; 2694 hdr->sadb_msg_version = PF_KEY_V2;
2695 hdr->sadb_msg_errno = (uint8_t) 0; 2695 hdr->sadb_msg_errno = (uint8_t) 0;
2696 hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
2696 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); 2697 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
2697 pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); 2698 pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
2698 return 0; 2699 return 0;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index d36875f3427e..8aecf5df6656 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -114,7 +114,6 @@ struct l2tp_net {
114 114
115static void l2tp_session_set_header_len(struct l2tp_session *session, int version); 115static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
116static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); 116static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
117static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
118 117
119static inline struct l2tp_net *l2tp_pernet(struct net *net) 118static inline struct l2tp_net *l2tp_pernet(struct net *net)
120{ 119{
@@ -192,6 +191,7 @@ struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
192 } else { 191 } else {
193 /* Socket is owned by kernelspace */ 192 /* Socket is owned by kernelspace */
194 sk = tunnel->sock; 193 sk = tunnel->sock;
194 sock_hold(sk);
195 } 195 }
196 196
197out: 197out:
@@ -210,6 +210,7 @@ void l2tp_tunnel_sock_put(struct sock *sk)
210 } 210 }
211 sock_put(sk); 211 sock_put(sk);
212 } 212 }
213 sock_put(sk);
213} 214}
214EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); 215EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
215 216
@@ -373,10 +374,8 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
373 struct sk_buff *skbp; 374 struct sk_buff *skbp;
374 struct sk_buff *tmp; 375 struct sk_buff *tmp;
375 u32 ns = L2TP_SKB_CB(skb)->ns; 376 u32 ns = L2TP_SKB_CB(skb)->ns;
376 struct l2tp_stats *sstats;
377 377
378 spin_lock_bh(&session->reorder_q.lock); 378 spin_lock_bh(&session->reorder_q.lock);
379 sstats = &session->stats;
380 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { 379 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
381 if (L2TP_SKB_CB(skbp)->ns > ns) { 380 if (L2TP_SKB_CB(skbp)->ns > ns) {
382 __skb_queue_before(&session->reorder_q, skbp, skb); 381 __skb_queue_before(&session->reorder_q, skbp, skb);
@@ -384,9 +383,7 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
384 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", 383 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
385 session->name, ns, L2TP_SKB_CB(skbp)->ns, 384 session->name, ns, L2TP_SKB_CB(skbp)->ns,
386 skb_queue_len(&session->reorder_q)); 385 skb_queue_len(&session->reorder_q));
387 u64_stats_update_begin(&sstats->syncp); 386 atomic_long_inc(&session->stats.rx_oos_packets);
388 sstats->rx_oos_packets++;
389 u64_stats_update_end(&sstats->syncp);
390 goto out; 387 goto out;
391 } 388 }
392 } 389 }
@@ -403,23 +400,16 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
403{ 400{
404 struct l2tp_tunnel *tunnel = session->tunnel; 401 struct l2tp_tunnel *tunnel = session->tunnel;
405 int length = L2TP_SKB_CB(skb)->length; 402 int length = L2TP_SKB_CB(skb)->length;
406 struct l2tp_stats *tstats, *sstats;
407 403
408 /* We're about to requeue the skb, so return resources 404 /* We're about to requeue the skb, so return resources
409 * to its current owner (a socket receive buffer). 405 * to its current owner (a socket receive buffer).
410 */ 406 */
411 skb_orphan(skb); 407 skb_orphan(skb);
412 408
413 tstats = &tunnel->stats; 409 atomic_long_inc(&tunnel->stats.rx_packets);
414 u64_stats_update_begin(&tstats->syncp); 410 atomic_long_add(length, &tunnel->stats.rx_bytes);
415 sstats = &session->stats; 411 atomic_long_inc(&session->stats.rx_packets);
416 u64_stats_update_begin(&sstats->syncp); 412 atomic_long_add(length, &session->stats.rx_bytes);
417 tstats->rx_packets++;
418 tstats->rx_bytes += length;
419 sstats->rx_packets++;
420 sstats->rx_bytes += length;
421 u64_stats_update_end(&tstats->syncp);
422 u64_stats_update_end(&sstats->syncp);
423 413
424 if (L2TP_SKB_CB(skb)->has_seq) { 414 if (L2TP_SKB_CB(skb)->has_seq) {
425 /* Bump our Nr */ 415 /* Bump our Nr */
@@ -450,7 +440,6 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
450{ 440{
451 struct sk_buff *skb; 441 struct sk_buff *skb;
452 struct sk_buff *tmp; 442 struct sk_buff *tmp;
453 struct l2tp_stats *sstats;
454 443
455 /* If the pkt at the head of the queue has the nr that we 444 /* If the pkt at the head of the queue has the nr that we
456 * expect to send up next, dequeue it and any other 445 * expect to send up next, dequeue it and any other
@@ -458,13 +447,10 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
458 */ 447 */
459start: 448start:
460 spin_lock_bh(&session->reorder_q.lock); 449 spin_lock_bh(&session->reorder_q.lock);
461 sstats = &session->stats;
462 skb_queue_walk_safe(&session->reorder_q, skb, tmp) { 450 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
463 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { 451 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
464 u64_stats_update_begin(&sstats->syncp); 452 atomic_long_inc(&session->stats.rx_seq_discards);
465 sstats->rx_seq_discards++; 453 atomic_long_inc(&session->stats.rx_errors);
466 sstats->rx_errors++;
467 u64_stats_update_end(&sstats->syncp);
468 l2tp_dbg(session, L2TP_MSG_SEQ, 454 l2tp_dbg(session, L2TP_MSG_SEQ,
469 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", 455 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
470 session->name, L2TP_SKB_CB(skb)->ns, 456 session->name, L2TP_SKB_CB(skb)->ns,
@@ -623,7 +609,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
623 struct l2tp_tunnel *tunnel = session->tunnel; 609 struct l2tp_tunnel *tunnel = session->tunnel;
624 int offset; 610 int offset;
625 u32 ns, nr; 611 u32 ns, nr;
626 struct l2tp_stats *sstats = &session->stats;
627 612
628 /* The ref count is increased since we now hold a pointer to 613 /* The ref count is increased since we now hold a pointer to
629 * the session. Take care to decrement the refcnt when exiting 614 * the session. Take care to decrement the refcnt when exiting
@@ -640,9 +625,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
640 "%s: cookie mismatch (%u/%u). Discarding.\n", 625 "%s: cookie mismatch (%u/%u). Discarding.\n",
641 tunnel->name, tunnel->tunnel_id, 626 tunnel->name, tunnel->tunnel_id,
642 session->session_id); 627 session->session_id);
643 u64_stats_update_begin(&sstats->syncp); 628 atomic_long_inc(&session->stats.rx_cookie_discards);
644 sstats->rx_cookie_discards++;
645 u64_stats_update_end(&sstats->syncp);
646 goto discard; 629 goto discard;
647 } 630 }
648 ptr += session->peer_cookie_len; 631 ptr += session->peer_cookie_len;
@@ -711,9 +694,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
711 l2tp_warn(session, L2TP_MSG_SEQ, 694 l2tp_warn(session, L2TP_MSG_SEQ,
712 "%s: recv data has no seq numbers when required. Discarding.\n", 695 "%s: recv data has no seq numbers when required. Discarding.\n",
713 session->name); 696 session->name);
714 u64_stats_update_begin(&sstats->syncp); 697 atomic_long_inc(&session->stats.rx_seq_discards);
715 sstats->rx_seq_discards++;
716 u64_stats_update_end(&sstats->syncp);
717 goto discard; 698 goto discard;
718 } 699 }
719 700
@@ -732,9 +713,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
732 l2tp_warn(session, L2TP_MSG_SEQ, 713 l2tp_warn(session, L2TP_MSG_SEQ,
733 "%s: recv data has no seq numbers when required. Discarding.\n", 714 "%s: recv data has no seq numbers when required. Discarding.\n",
734 session->name); 715 session->name);
735 u64_stats_update_begin(&sstats->syncp); 716 atomic_long_inc(&session->stats.rx_seq_discards);
736 sstats->rx_seq_discards++;
737 u64_stats_update_end(&sstats->syncp);
738 goto discard; 717 goto discard;
739 } 718 }
740 } 719 }
@@ -788,9 +767,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
788 * packets 767 * packets
789 */ 768 */
790 if (L2TP_SKB_CB(skb)->ns != session->nr) { 769 if (L2TP_SKB_CB(skb)->ns != session->nr) {
791 u64_stats_update_begin(&sstats->syncp); 770 atomic_long_inc(&session->stats.rx_seq_discards);
792 sstats->rx_seq_discards++;
793 u64_stats_update_end(&sstats->syncp);
794 l2tp_dbg(session, L2TP_MSG_SEQ, 771 l2tp_dbg(session, L2TP_MSG_SEQ,
795 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", 772 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
796 session->name, L2TP_SKB_CB(skb)->ns, 773 session->name, L2TP_SKB_CB(skb)->ns,
@@ -816,9 +793,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
816 return; 793 return;
817 794
818discard: 795discard:
819 u64_stats_update_begin(&sstats->syncp); 796 atomic_long_inc(&session->stats.rx_errors);
820 sstats->rx_errors++;
821 u64_stats_update_end(&sstats->syncp);
822 kfree_skb(skb); 797 kfree_skb(skb);
823 798
824 if (session->deref) 799 if (session->deref)
@@ -828,6 +803,23 @@ discard:
828} 803}
829EXPORT_SYMBOL(l2tp_recv_common); 804EXPORT_SYMBOL(l2tp_recv_common);
830 805
806/* Drop skbs from the session's reorder_q
807 */
808int l2tp_session_queue_purge(struct l2tp_session *session)
809{
810 struct sk_buff *skb = NULL;
811 BUG_ON(!session);
812 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
813 while ((skb = skb_dequeue(&session->reorder_q))) {
814 atomic_long_inc(&session->stats.rx_errors);
815 kfree_skb(skb);
816 if (session->deref)
817 (*session->deref)(session);
818 }
819 return 0;
820}
821EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
822
831/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame 823/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
832 * here. The skb is not on a list when we get here. 824 * here. The skb is not on a list when we get here.
833 * Returns 0 if the packet was a data packet and was successfully passed on. 825 * Returns 0 if the packet was a data packet and was successfully passed on.
@@ -843,7 +835,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
843 u32 tunnel_id, session_id; 835 u32 tunnel_id, session_id;
844 u16 version; 836 u16 version;
845 int length; 837 int length;
846 struct l2tp_stats *tstats;
847 838
848 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) 839 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
849 goto discard_bad_csum; 840 goto discard_bad_csum;
@@ -932,10 +923,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
932discard_bad_csum: 923discard_bad_csum:
933 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); 924 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
934 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); 925 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
935 tstats = &tunnel->stats; 926 atomic_long_inc(&tunnel->stats.rx_errors);
936 u64_stats_update_begin(&tstats->syncp);
937 tstats->rx_errors++;
938 u64_stats_update_end(&tstats->syncp);
939 kfree_skb(skb); 927 kfree_skb(skb);
940 928
941 return 0; 929 return 0;
@@ -1062,7 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1062 struct l2tp_tunnel *tunnel = session->tunnel; 1050 struct l2tp_tunnel *tunnel = session->tunnel;
1063 unsigned int len = skb->len; 1051 unsigned int len = skb->len;
1064 int error; 1052 int error;
1065 struct l2tp_stats *tstats, *sstats;
1066 1053
1067 /* Debug */ 1054 /* Debug */
1068 if (session->send_seq) 1055 if (session->send_seq)
@@ -1091,21 +1078,15 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1091 error = ip_queue_xmit(skb, fl); 1078 error = ip_queue_xmit(skb, fl);
1092 1079
1093 /* Update stats */ 1080 /* Update stats */
1094 tstats = &tunnel->stats;
1095 u64_stats_update_begin(&tstats->syncp);
1096 sstats = &session->stats;
1097 u64_stats_update_begin(&sstats->syncp);
1098 if (error >= 0) { 1081 if (error >= 0) {
1099 tstats->tx_packets++; 1082 atomic_long_inc(&tunnel->stats.tx_packets);
1100 tstats->tx_bytes += len; 1083 atomic_long_add(len, &tunnel->stats.tx_bytes);
1101 sstats->tx_packets++; 1084 atomic_long_inc(&session->stats.tx_packets);
1102 sstats->tx_bytes += len; 1085 atomic_long_add(len, &session->stats.tx_bytes);
1103 } else { 1086 } else {
1104 tstats->tx_errors++; 1087 atomic_long_inc(&tunnel->stats.tx_errors);
1105 sstats->tx_errors++; 1088 atomic_long_inc(&session->stats.tx_errors);
1106 } 1089 }
1107 u64_stats_update_end(&tstats->syncp);
1108 u64_stats_update_end(&sstats->syncp);
1109 1090
1110 return 0; 1091 return 0;
1111} 1092}
@@ -1282,6 +1263,7 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1282 /* No longer an encapsulation socket. See net/ipv4/udp.c */ 1263 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1283 (udp_sk(sk))->encap_type = 0; 1264 (udp_sk(sk))->encap_type = 0;
1284 (udp_sk(sk))->encap_rcv = NULL; 1265 (udp_sk(sk))->encap_rcv = NULL;
1266 (udp_sk(sk))->encap_destroy = NULL;
1285 break; 1267 break;
1286 case L2TP_ENCAPTYPE_IP: 1268 case L2TP_ENCAPTYPE_IP:
1287 break; 1269 break;
@@ -1311,7 +1293,7 @@ end:
1311 1293
1312/* When the tunnel is closed, all the attached sessions need to go too. 1294/* When the tunnel is closed, all the attached sessions need to go too.
1313 */ 1295 */
1314static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) 1296void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1315{ 1297{
1316 int hash; 1298 int hash;
1317 struct hlist_node *walk; 1299 struct hlist_node *walk;
@@ -1334,25 +1316,13 @@ again:
1334 1316
1335 hlist_del_init(&session->hlist); 1317 hlist_del_init(&session->hlist);
1336 1318
1337 /* Since we should hold the sock lock while
1338 * doing any unbinding, we need to release the
1339 * lock we're holding before taking that lock.
1340 * Hold a reference to the sock so it doesn't
1341 * disappear as we're jumping between locks.
1342 */
1343 if (session->ref != NULL) 1319 if (session->ref != NULL)
1344 (*session->ref)(session); 1320 (*session->ref)(session);
1345 1321
1346 write_unlock_bh(&tunnel->hlist_lock); 1322 write_unlock_bh(&tunnel->hlist_lock);
1347 1323
1348 if (tunnel->version != L2TP_HDR_VER_2) { 1324 __l2tp_session_unhash(session);
1349 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); 1325 l2tp_session_queue_purge(session);
1350
1351 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1352 hlist_del_init_rcu(&session->global_hlist);
1353 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1354 synchronize_rcu();
1355 }
1356 1326
1357 if (session->session_close != NULL) 1327 if (session->session_close != NULL)
1358 (*session->session_close)(session); 1328 (*session->session_close)(session);
@@ -1360,6 +1330,8 @@ again:
1360 if (session->deref != NULL) 1330 if (session->deref != NULL)
1361 (*session->deref)(session); 1331 (*session->deref)(session);
1362 1332
1333 l2tp_session_dec_refcount(session);
1334
1363 write_lock_bh(&tunnel->hlist_lock); 1335 write_lock_bh(&tunnel->hlist_lock);
1364 1336
1365 /* Now restart from the beginning of this hash 1337 /* Now restart from the beginning of this hash
@@ -1372,6 +1344,17 @@ again:
1372 } 1344 }
1373 write_unlock_bh(&tunnel->hlist_lock); 1345 write_unlock_bh(&tunnel->hlist_lock);
1374} 1346}
1347EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1348
1349/* Tunnel socket destroy hook for UDP encapsulation */
1350static void l2tp_udp_encap_destroy(struct sock *sk)
1351{
1352 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
1353 if (tunnel) {
1354 l2tp_tunnel_closeall(tunnel);
1355 sock_put(sk);
1356 }
1357}
1375 1358
1376/* Really kill the tunnel. 1359/* Really kill the tunnel.
1377 * Come here only when all sessions have been cleared from the tunnel. 1360 * Come here only when all sessions have been cleared from the tunnel.
@@ -1397,19 +1380,21 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
1397 return; 1380 return;
1398 1381
1399 sock = sk->sk_socket; 1382 sock = sk->sk_socket;
1400 BUG_ON(!sock);
1401 1383
1402 /* If the tunnel socket was created directly by the kernel, use the 1384 /* If the tunnel socket was created by userspace, then go through the
1403 * sk_* API to release the socket now. Otherwise go through the 1385 * inet layer to shut the socket down, and let userspace close it.
1404 * inet_* layer to shut the socket down, and let userspace close it. 1386 * Otherwise, if we created the socket directly within the kernel, use
1387 * the sk API to release it here.
1405 * In either case the tunnel resources are freed in the socket 1388 * In either case the tunnel resources are freed in the socket
1406 * destructor when the tunnel socket goes away. 1389 * destructor when the tunnel socket goes away.
1407 */ 1390 */
1408 if (sock->file == NULL) { 1391 if (tunnel->fd >= 0) {
1409 kernel_sock_shutdown(sock, SHUT_RDWR); 1392 if (sock)
1410 sk_release_kernel(sk); 1393 inet_shutdown(sock, 2);
1411 } else { 1394 } else {
1412 inet_shutdown(sock, 2); 1395 if (sock)
1396 kernel_sock_shutdown(sock, SHUT_RDWR);
1397 sk_release_kernel(sk);
1413 } 1398 }
1414 1399
1415 l2tp_tunnel_sock_put(sk); 1400 l2tp_tunnel_sock_put(sk);
@@ -1668,6 +1653,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1668 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1653 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1669 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; 1654 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1670 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; 1655 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1656 udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
1671#if IS_ENABLED(CONFIG_IPV6) 1657#if IS_ENABLED(CONFIG_IPV6)
1672 if (sk->sk_family == PF_INET6) 1658 if (sk->sk_family == PF_INET6)
1673 udpv6_encap_enable(); 1659 udpv6_encap_enable();
@@ -1723,6 +1709,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1723 */ 1709 */
1724int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1710int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1725{ 1711{
1712 l2tp_tunnel_closeall(tunnel);
1726 return (false == queue_work(l2tp_wq, &tunnel->del_work)); 1713 return (false == queue_work(l2tp_wq, &tunnel->del_work));
1727} 1714}
1728EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); 1715EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
@@ -1731,62 +1718,71 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1731 */ 1718 */
1732void l2tp_session_free(struct l2tp_session *session) 1719void l2tp_session_free(struct l2tp_session *session)
1733{ 1720{
1734 struct l2tp_tunnel *tunnel; 1721 struct l2tp_tunnel *tunnel = session->tunnel;
1735 1722
1736 BUG_ON(atomic_read(&session->ref_count) != 0); 1723 BUG_ON(atomic_read(&session->ref_count) != 0);
1737 1724
1738 tunnel = session->tunnel; 1725 if (tunnel) {
1739 if (tunnel != NULL) {
1740 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); 1726 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1727 if (session->session_id != 0)
1728 atomic_dec(&l2tp_session_count);
1729 sock_put(tunnel->sock);
1730 session->tunnel = NULL;
1731 l2tp_tunnel_dec_refcount(tunnel);
1732 }
1733
1734 kfree(session);
1741 1735
1742 /* Delete the session from the hash */ 1736 return;
1737}
1738EXPORT_SYMBOL_GPL(l2tp_session_free);
1739
1740/* Remove an l2tp session from l2tp_core's hash lists.
1741 * Provides a tidyup interface for pseudowire code which can't just route all
1742 * shutdown via. l2tp_session_delete and a pseudowire-specific session_close
1743 * callback.
1744 */
1745void __l2tp_session_unhash(struct l2tp_session *session)
1746{
1747 struct l2tp_tunnel *tunnel = session->tunnel;
1748
1749 /* Remove the session from core hashes */
1750 if (tunnel) {
1751 /* Remove from the per-tunnel hash */
1743 write_lock_bh(&tunnel->hlist_lock); 1752 write_lock_bh(&tunnel->hlist_lock);
1744 hlist_del_init(&session->hlist); 1753 hlist_del_init(&session->hlist);
1745 write_unlock_bh(&tunnel->hlist_lock); 1754 write_unlock_bh(&tunnel->hlist_lock);
1746 1755
1747 /* Unlink from the global hash if not L2TPv2 */ 1756 /* For L2TPv3 we have a per-net hash: remove from there, too */
1748 if (tunnel->version != L2TP_HDR_VER_2) { 1757 if (tunnel->version != L2TP_HDR_VER_2) {
1749 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); 1758 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1750
1751 spin_lock_bh(&pn->l2tp_session_hlist_lock); 1759 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1752 hlist_del_init_rcu(&session->global_hlist); 1760 hlist_del_init_rcu(&session->global_hlist);
1753 spin_unlock_bh(&pn->l2tp_session_hlist_lock); 1761 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1754 synchronize_rcu(); 1762 synchronize_rcu();
1755 } 1763 }
1756
1757 if (session->session_id != 0)
1758 atomic_dec(&l2tp_session_count);
1759
1760 sock_put(tunnel->sock);
1761
1762 /* This will delete the tunnel context if this
1763 * is the last session on the tunnel.
1764 */
1765 session->tunnel = NULL;
1766 l2tp_tunnel_dec_refcount(tunnel);
1767 } 1764 }
1768
1769 kfree(session);
1770
1771 return;
1772} 1765}
1773EXPORT_SYMBOL_GPL(l2tp_session_free); 1766EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
1774 1767
1775/* This function is used by the netlink SESSION_DELETE command and by 1768/* This function is used by the netlink SESSION_DELETE command and by
1776 pseudowire modules. 1769 pseudowire modules.
1777 */ 1770 */
1778int l2tp_session_delete(struct l2tp_session *session) 1771int l2tp_session_delete(struct l2tp_session *session)
1779{ 1772{
1773 if (session->ref)
1774 (*session->ref)(session);
1775 __l2tp_session_unhash(session);
1776 l2tp_session_queue_purge(session);
1780 if (session->session_close != NULL) 1777 if (session->session_close != NULL)
1781 (*session->session_close)(session); 1778 (*session->session_close)(session);
1782 1779 if (session->deref)
1780 (*session->ref)(session);
1783 l2tp_session_dec_refcount(session); 1781 l2tp_session_dec_refcount(session);
1784
1785 return 0; 1782 return 0;
1786} 1783}
1787EXPORT_SYMBOL_GPL(l2tp_session_delete); 1784EXPORT_SYMBOL_GPL(l2tp_session_delete);
1788 1785
1789
1790/* We come here whenever a session's send_seq, cookie_len or 1786/* We come here whenever a session's send_seq, cookie_len or
1791 * l2specific_len parameters are set. 1787 * l2specific_len parameters are set.
1792 */ 1788 */
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 8eb8f1d47f3a..485a490fd990 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -36,16 +36,15 @@ enum {
36struct sk_buff; 36struct sk_buff;
37 37
38struct l2tp_stats { 38struct l2tp_stats {
39 u64 tx_packets; 39 atomic_long_t tx_packets;
40 u64 tx_bytes; 40 atomic_long_t tx_bytes;
41 u64 tx_errors; 41 atomic_long_t tx_errors;
42 u64 rx_packets; 42 atomic_long_t rx_packets;
43 u64 rx_bytes; 43 atomic_long_t rx_bytes;
44 u64 rx_seq_discards; 44 atomic_long_t rx_seq_discards;
45 u64 rx_oos_packets; 45 atomic_long_t rx_oos_packets;
46 u64 rx_errors; 46 atomic_long_t rx_errors;
47 u64 rx_cookie_discards; 47 atomic_long_t rx_cookie_discards;
48 struct u64_stats_sync syncp;
49}; 48};
50 49
51struct l2tp_tunnel; 50struct l2tp_tunnel;
@@ -240,11 +239,14 @@ extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
240extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); 239extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
241 240
242extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); 241extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
242extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
243extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); 243extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
244extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); 244extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
245extern void __l2tp_session_unhash(struct l2tp_session *session);
245extern int l2tp_session_delete(struct l2tp_session *session); 246extern int l2tp_session_delete(struct l2tp_session *session);
246extern void l2tp_session_free(struct l2tp_session *session); 247extern void l2tp_session_free(struct l2tp_session *session);
247extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); 248extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
249extern int l2tp_session_queue_purge(struct l2tp_session *session);
248extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); 250extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
249 251
250extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); 252extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index c3813bc84552..072d7202e182 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -146,14 +146,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
146 tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, 146 tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,
147 atomic_read(&tunnel->ref_count)); 147 atomic_read(&tunnel->ref_count));
148 148
149 seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n", 149 seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",
150 tunnel->debug, 150 tunnel->debug,
151 (unsigned long long)tunnel->stats.tx_packets, 151 atomic_long_read(&tunnel->stats.tx_packets),
152 (unsigned long long)tunnel->stats.tx_bytes, 152 atomic_long_read(&tunnel->stats.tx_bytes),
153 (unsigned long long)tunnel->stats.tx_errors, 153 atomic_long_read(&tunnel->stats.tx_errors),
154 (unsigned long long)tunnel->stats.rx_packets, 154 atomic_long_read(&tunnel->stats.rx_packets),
155 (unsigned long long)tunnel->stats.rx_bytes, 155 atomic_long_read(&tunnel->stats.rx_bytes),
156 (unsigned long long)tunnel->stats.rx_errors); 156 atomic_long_read(&tunnel->stats.rx_errors));
157 157
158 if (tunnel->show != NULL) 158 if (tunnel->show != NULL)
159 tunnel->show(m, tunnel); 159 tunnel->show(m, tunnel);
@@ -203,14 +203,14 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
203 seq_printf(m, "\n"); 203 seq_printf(m, "\n");
204 } 204 }
205 205
206 seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n", 206 seq_printf(m, " %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n",
207 session->nr, session->ns, 207 session->nr, session->ns,
208 (unsigned long long)session->stats.tx_packets, 208 atomic_long_read(&session->stats.tx_packets),
209 (unsigned long long)session->stats.tx_bytes, 209 atomic_long_read(&session->stats.tx_bytes),
210 (unsigned long long)session->stats.tx_errors, 210 atomic_long_read(&session->stats.tx_errors),
211 (unsigned long long)session->stats.rx_packets, 211 atomic_long_read(&session->stats.rx_packets),
212 (unsigned long long)session->stats.rx_bytes, 212 atomic_long_read(&session->stats.rx_bytes),
213 (unsigned long long)session->stats.rx_errors); 213 atomic_long_read(&session->stats.rx_errors));
214 214
215 if (session->show != NULL) 215 if (session->show != NULL)
216 session->show(m, session); 216 session->show(m, session);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 7f41b7051269..571db8dd2292 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -228,10 +228,16 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
228static void l2tp_ip_destroy_sock(struct sock *sk) 228static void l2tp_ip_destroy_sock(struct sock *sk)
229{ 229{
230 struct sk_buff *skb; 230 struct sk_buff *skb;
231 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
231 232
232 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) 233 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
233 kfree_skb(skb); 234 kfree_skb(skb);
234 235
236 if (tunnel) {
237 l2tp_tunnel_closeall(tunnel);
238 sock_put(sk);
239 }
240
235 sk_refcnt_debug_dec(sk); 241 sk_refcnt_debug_dec(sk);
236} 242}
237 243
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 41f2f8126ebc..b8a6039314e8 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -241,10 +241,17 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)
241 241
242static void l2tp_ip6_destroy_sock(struct sock *sk) 242static void l2tp_ip6_destroy_sock(struct sock *sk)
243{ 243{
244 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
245
244 lock_sock(sk); 246 lock_sock(sk);
245 ip6_flush_pending_frames(sk); 247 ip6_flush_pending_frames(sk);
246 release_sock(sk); 248 release_sock(sk);
247 249
250 if (tunnel) {
251 l2tp_tunnel_closeall(tunnel);
252 sock_put(sk);
253 }
254
248 inet6_destroy_sock(sk); 255 inet6_destroy_sock(sk);
249} 256}
250 257
@@ -683,6 +690,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
683 lsa->l2tp_addr = ipv6_hdr(skb)->saddr; 690 lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
684 lsa->l2tp_flowinfo = 0; 691 lsa->l2tp_flowinfo = 0;
685 lsa->l2tp_scope_id = 0; 692 lsa->l2tp_scope_id = 0;
693 lsa->l2tp_conn_id = 0;
686 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) 694 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
687 lsa->l2tp_scope_id = IP6CB(skb)->iif; 695 lsa->l2tp_scope_id = IP6CB(skb)->iif;
688 } 696 }
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index c1bab22db85e..0825ff26e113 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -246,8 +246,6 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
246#if IS_ENABLED(CONFIG_IPV6) 246#if IS_ENABLED(CONFIG_IPV6)
247 struct ipv6_pinfo *np = NULL; 247 struct ipv6_pinfo *np = NULL;
248#endif 248#endif
249 struct l2tp_stats stats;
250 unsigned int start;
251 249
252 hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, 250 hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags,
253 L2TP_CMD_TUNNEL_GET); 251 L2TP_CMD_TUNNEL_GET);
@@ -265,28 +263,22 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
265 if (nest == NULL) 263 if (nest == NULL)
266 goto nla_put_failure; 264 goto nla_put_failure;
267 265
268 do { 266 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
269 start = u64_stats_fetch_begin(&tunnel->stats.syncp); 267 atomic_long_read(&tunnel->stats.tx_packets)) ||
270 stats.tx_packets = tunnel->stats.tx_packets; 268 nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
271 stats.tx_bytes = tunnel->stats.tx_bytes; 269 atomic_long_read(&tunnel->stats.tx_bytes)) ||
272 stats.tx_errors = tunnel->stats.tx_errors; 270 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
273 stats.rx_packets = tunnel->stats.rx_packets; 271 atomic_long_read(&tunnel->stats.tx_errors)) ||
274 stats.rx_bytes = tunnel->stats.rx_bytes; 272 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
275 stats.rx_errors = tunnel->stats.rx_errors; 273 atomic_long_read(&tunnel->stats.rx_packets)) ||
276 stats.rx_seq_discards = tunnel->stats.rx_seq_discards; 274 nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
277 stats.rx_oos_packets = tunnel->stats.rx_oos_packets; 275 atomic_long_read(&tunnel->stats.rx_bytes)) ||
278 } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start));
279
280 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
281 nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
282 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
283 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
284 nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
285 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, 276 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
286 stats.rx_seq_discards) || 277 atomic_long_read(&tunnel->stats.rx_seq_discards)) ||
287 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, 278 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
288 stats.rx_oos_packets) || 279 atomic_long_read(&tunnel->stats.rx_oos_packets)) ||
289 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) 280 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
281 atomic_long_read(&tunnel->stats.rx_errors)))
290 goto nla_put_failure; 282 goto nla_put_failure;
291 nla_nest_end(skb, nest); 283 nla_nest_end(skb, nest);
292 284
@@ -612,8 +604,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
612 struct nlattr *nest; 604 struct nlattr *nest;
613 struct l2tp_tunnel *tunnel = session->tunnel; 605 struct l2tp_tunnel *tunnel = session->tunnel;
614 struct sock *sk = NULL; 606 struct sock *sk = NULL;
615 struct l2tp_stats stats;
616 unsigned int start;
617 607
618 sk = tunnel->sock; 608 sk = tunnel->sock;
619 609
@@ -656,28 +646,22 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
656 if (nest == NULL) 646 if (nest == NULL)
657 goto nla_put_failure; 647 goto nla_put_failure;
658 648
659 do { 649 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
660 start = u64_stats_fetch_begin(&session->stats.syncp); 650 atomic_long_read(&session->stats.tx_packets)) ||
661 stats.tx_packets = session->stats.tx_packets; 651 nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
662 stats.tx_bytes = session->stats.tx_bytes; 652 atomic_long_read(&session->stats.tx_bytes)) ||
663 stats.tx_errors = session->stats.tx_errors; 653 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
664 stats.rx_packets = session->stats.rx_packets; 654 atomic_long_read(&session->stats.tx_errors)) ||
665 stats.rx_bytes = session->stats.rx_bytes; 655 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
666 stats.rx_errors = session->stats.rx_errors; 656 atomic_long_read(&session->stats.rx_packets)) ||
667 stats.rx_seq_discards = session->stats.rx_seq_discards; 657 nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
668 stats.rx_oos_packets = session->stats.rx_oos_packets; 658 atomic_long_read(&session->stats.rx_bytes)) ||
669 } while (u64_stats_fetch_retry(&session->stats.syncp, start));
670
671 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
672 nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
673 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
674 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
675 nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
676 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, 659 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
677 stats.rx_seq_discards) || 660 atomic_long_read(&session->stats.rx_seq_discards)) ||
678 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, 661 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
679 stats.rx_oos_packets) || 662 atomic_long_read(&session->stats.rx_oos_packets)) ||
680 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) 663 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
664 atomic_long_read(&session->stats.rx_errors)))
681 goto nla_put_failure; 665 goto nla_put_failure;
682 nla_nest_end(skb, nest); 666 nla_nest_end(skb, nest);
683 667
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 6a53371dba1f..637a341c1e2d 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -97,6 +97,7 @@
97#include <net/ip.h> 97#include <net/ip.h>
98#include <net/udp.h> 98#include <net/udp.h>
99#include <net/xfrm.h> 99#include <net/xfrm.h>
100#include <net/inet_common.h>
100 101
101#include <asm/byteorder.h> 102#include <asm/byteorder.h>
102#include <linux/atomic.h> 103#include <linux/atomic.h>
@@ -259,7 +260,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
259 session->name); 260 session->name);
260 261
261 /* Not bound. Nothing we can do, so discard. */ 262 /* Not bound. Nothing we can do, so discard. */
262 session->stats.rx_errors++; 263 atomic_long_inc(&session->stats.rx_errors);
263 kfree_skb(skb); 264 kfree_skb(skb);
264 } 265 }
265 266
@@ -447,34 +448,16 @@ static void pppol2tp_session_close(struct l2tp_session *session)
447{ 448{
448 struct pppol2tp_session *ps = l2tp_session_priv(session); 449 struct pppol2tp_session *ps = l2tp_session_priv(session);
449 struct sock *sk = ps->sock; 450 struct sock *sk = ps->sock;
450 struct sk_buff *skb; 451 struct socket *sock = sk->sk_socket;
451 452
452 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 453 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
453 454
454 if (session->session_id == 0)
455 goto out;
456
457 if (sk != NULL) {
458 lock_sock(sk);
459
460 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
461 pppox_unbind_sock(sk);
462 sk->sk_state = PPPOX_DEAD;
463 sk->sk_state_change(sk);
464 }
465
466 /* Purge any queued data */
467 skb_queue_purge(&sk->sk_receive_queue);
468 skb_queue_purge(&sk->sk_write_queue);
469 while ((skb = skb_dequeue(&session->reorder_q))) {
470 kfree_skb(skb);
471 sock_put(sk);
472 }
473 455
474 release_sock(sk); 456 if (sock) {
457 inet_shutdown(sock, 2);
458 /* Don't let the session go away before our socket does */
459 l2tp_session_inc_refcount(session);
475 } 460 }
476
477out:
478 return; 461 return;
479} 462}
480 463
@@ -483,19 +466,12 @@ out:
483 */ 466 */
484static void pppol2tp_session_destruct(struct sock *sk) 467static void pppol2tp_session_destruct(struct sock *sk)
485{ 468{
486 struct l2tp_session *session; 469 struct l2tp_session *session = sk->sk_user_data;
487 470 if (session) {
488 if (sk->sk_user_data != NULL) {
489 session = sk->sk_user_data;
490 if (session == NULL)
491 goto out;
492
493 sk->sk_user_data = NULL; 471 sk->sk_user_data = NULL;
494 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 472 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
495 l2tp_session_dec_refcount(session); 473 l2tp_session_dec_refcount(session);
496 } 474 }
497
498out:
499 return; 475 return;
500} 476}
501 477
@@ -525,16 +501,13 @@ static int pppol2tp_release(struct socket *sock)
525 session = pppol2tp_sock_to_session(sk); 501 session = pppol2tp_sock_to_session(sk);
526 502
527 /* Purge any queued data */ 503 /* Purge any queued data */
528 skb_queue_purge(&sk->sk_receive_queue);
529 skb_queue_purge(&sk->sk_write_queue);
530 if (session != NULL) { 504 if (session != NULL) {
531 struct sk_buff *skb; 505 __l2tp_session_unhash(session);
532 while ((skb = skb_dequeue(&session->reorder_q))) { 506 l2tp_session_queue_purge(session);
533 kfree_skb(skb);
534 sock_put(sk);
535 }
536 sock_put(sk); 507 sock_put(sk);
537 } 508 }
509 skb_queue_purge(&sk->sk_receive_queue);
510 skb_queue_purge(&sk->sk_write_queue);
538 511
539 release_sock(sk); 512 release_sock(sk);
540 513
@@ -880,18 +853,6 @@ out:
880 return error; 853 return error;
881} 854}
882 855
883/* Called when deleting sessions via the netlink interface.
884 */
885static int pppol2tp_session_delete(struct l2tp_session *session)
886{
887 struct pppol2tp_session *ps = l2tp_session_priv(session);
888
889 if (ps->sock == NULL)
890 l2tp_session_dec_refcount(session);
891
892 return 0;
893}
894
895#endif /* CONFIG_L2TP_V3 */ 856#endif /* CONFIG_L2TP_V3 */
896 857
897/* getname() support. 858/* getname() support.
@@ -1025,14 +986,14 @@ end:
1025static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, 986static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
1026 struct l2tp_stats *stats) 987 struct l2tp_stats *stats)
1027{ 988{
1028 dest->tx_packets = stats->tx_packets; 989 dest->tx_packets = atomic_long_read(&stats->tx_packets);
1029 dest->tx_bytes = stats->tx_bytes; 990 dest->tx_bytes = atomic_long_read(&stats->tx_bytes);
1030 dest->tx_errors = stats->tx_errors; 991 dest->tx_errors = atomic_long_read(&stats->tx_errors);
1031 dest->rx_packets = stats->rx_packets; 992 dest->rx_packets = atomic_long_read(&stats->rx_packets);
1032 dest->rx_bytes = stats->rx_bytes; 993 dest->rx_bytes = atomic_long_read(&stats->rx_bytes);
1033 dest->rx_seq_discards = stats->rx_seq_discards; 994 dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards);
1034 dest->rx_oos_packets = stats->rx_oos_packets; 995 dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets);
1035 dest->rx_errors = stats->rx_errors; 996 dest->rx_errors = atomic_long_read(&stats->rx_errors);
1036} 997}
1037 998
1038/* Session ioctl helper. 999/* Session ioctl helper.
@@ -1666,14 +1627,14 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
1666 tunnel->name, 1627 tunnel->name,
1667 (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', 1628 (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
1668 atomic_read(&tunnel->ref_count) - 1); 1629 atomic_read(&tunnel->ref_count) - 1);
1669 seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", 1630 seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n",
1670 tunnel->debug, 1631 tunnel->debug,
1671 (unsigned long long)tunnel->stats.tx_packets, 1632 atomic_long_read(&tunnel->stats.tx_packets),
1672 (unsigned long long)tunnel->stats.tx_bytes, 1633 atomic_long_read(&tunnel->stats.tx_bytes),
1673 (unsigned long long)tunnel->stats.tx_errors, 1634 atomic_long_read(&tunnel->stats.tx_errors),
1674 (unsigned long long)tunnel->stats.rx_packets, 1635 atomic_long_read(&tunnel->stats.rx_packets),
1675 (unsigned long long)tunnel->stats.rx_bytes, 1636 atomic_long_read(&tunnel->stats.rx_bytes),
1676 (unsigned long long)tunnel->stats.rx_errors); 1637 atomic_long_read(&tunnel->stats.rx_errors));
1677} 1638}
1678 1639
1679static void pppol2tp_seq_session_show(struct seq_file *m, void *v) 1640static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
@@ -1708,14 +1669,14 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
1708 session->lns_mode ? "LNS" : "LAC", 1669 session->lns_mode ? "LNS" : "LAC",
1709 session->debug, 1670 session->debug,
1710 jiffies_to_msecs(session->reorder_timeout)); 1671 jiffies_to_msecs(session->reorder_timeout));
1711 seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", 1672 seq_printf(m, " %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n",
1712 session->nr, session->ns, 1673 session->nr, session->ns,
1713 (unsigned long long)session->stats.tx_packets, 1674 atomic_long_read(&session->stats.tx_packets),
1714 (unsigned long long)session->stats.tx_bytes, 1675 atomic_long_read(&session->stats.tx_bytes),
1715 (unsigned long long)session->stats.tx_errors, 1676 atomic_long_read(&session->stats.tx_errors),
1716 (unsigned long long)session->stats.rx_packets, 1677 atomic_long_read(&session->stats.rx_packets),
1717 (unsigned long long)session->stats.rx_bytes, 1678 atomic_long_read(&session->stats.rx_bytes),
1718 (unsigned long long)session->stats.rx_errors); 1679 atomic_long_read(&session->stats.rx_errors));
1719 1680
1720 if (po) 1681 if (po)
1721 seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); 1682 seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
@@ -1839,7 +1800,7 @@ static const struct pppox_proto pppol2tp_proto = {
1839 1800
1840static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { 1801static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = {
1841 .session_create = pppol2tp_session_create, 1802 .session_create = pppol2tp_session_create,
1842 .session_delete = pppol2tp_session_delete, 1803 .session_delete = l2tp_session_delete,
1843}; 1804};
1844 1805
1845#endif /* CONFIG_L2TP_V3 */ 1806#endif /* CONFIG_L2TP_V3 */
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 88709882c464..48aaa89253e0 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
720 int target; /* Read at least this many bytes */ 720 int target; /* Read at least this many bytes */
721 long timeo; 721 long timeo;
722 722
723 msg->msg_namelen = 0;
724
723 lock_sock(sk); 725 lock_sock(sk);
724 copied = -ENOTCONN; 726 copied = -ENOTCONN;
725 if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) 727 if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index fb306814576a..a6893602f87a 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2582,7 +2582,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
2582 list_del(&dep->list); 2582 list_del(&dep->list);
2583 mutex_unlock(&local->mtx); 2583 mutex_unlock(&local->mtx);
2584 2584
2585 ieee80211_roc_notify_destroy(dep); 2585 ieee80211_roc_notify_destroy(dep, true);
2586 return 0; 2586 return 0;
2587 } 2587 }
2588 2588
@@ -2622,7 +2622,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
2622 ieee80211_start_next_roc(local); 2622 ieee80211_start_next_roc(local);
2623 mutex_unlock(&local->mtx); 2623 mutex_unlock(&local->mtx);
2624 2624
2625 ieee80211_roc_notify_destroy(found); 2625 ieee80211_roc_notify_destroy(found, true);
2626 } else { 2626 } else {
2627 /* work may be pending so use it all the time */ 2627 /* work may be pending so use it all the time */
2628 found->abort = true; 2628 found->abort = true;
@@ -2632,6 +2632,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
2632 2632
2633 /* work will clean up etc */ 2633 /* work will clean up etc */
2634 flush_delayed_work(&found->work); 2634 flush_delayed_work(&found->work);
2635 WARN_ON(!found->to_be_freed);
2636 kfree(found);
2635 } 2637 }
2636 2638
2637 return 0; 2639 return 0;
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 78c0d90dd641..931be419ab5a 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -63,6 +63,7 @@ ieee80211_new_chanctx(struct ieee80211_local *local,
63 enum ieee80211_chanctx_mode mode) 63 enum ieee80211_chanctx_mode mode)
64{ 64{
65 struct ieee80211_chanctx *ctx; 65 struct ieee80211_chanctx *ctx;
66 u32 changed;
66 int err; 67 int err;
67 68
68 lockdep_assert_held(&local->chanctx_mtx); 69 lockdep_assert_held(&local->chanctx_mtx);
@@ -76,6 +77,13 @@ ieee80211_new_chanctx(struct ieee80211_local *local,
76 ctx->conf.rx_chains_dynamic = 1; 77 ctx->conf.rx_chains_dynamic = 1;
77 ctx->mode = mode; 78 ctx->mode = mode;
78 79
80 /* acquire mutex to prevent idle from changing */
81 mutex_lock(&local->mtx);
82 /* turn idle off *before* setting channel -- some drivers need that */
83 changed = ieee80211_idle_off(local);
84 if (changed)
85 ieee80211_hw_config(local, changed);
86
79 if (!local->use_chanctx) { 87 if (!local->use_chanctx) {
80 local->_oper_channel_type = 88 local->_oper_channel_type =
81 cfg80211_get_chandef_type(chandef); 89 cfg80211_get_chandef_type(chandef);
@@ -85,14 +93,17 @@ ieee80211_new_chanctx(struct ieee80211_local *local,
85 err = drv_add_chanctx(local, ctx); 93 err = drv_add_chanctx(local, ctx);
86 if (err) { 94 if (err) {
87 kfree(ctx); 95 kfree(ctx);
88 return ERR_PTR(err); 96 ctx = ERR_PTR(err);
97
98 ieee80211_recalc_idle(local);
99 goto out;
89 } 100 }
90 } 101 }
91 102
103 /* and keep the mutex held until the new chanctx is on the list */
92 list_add_rcu(&ctx->list, &local->chanctx_list); 104 list_add_rcu(&ctx->list, &local->chanctx_list);
93 105
94 mutex_lock(&local->mtx); 106 out:
95 ieee80211_recalc_idle(local);
96 mutex_unlock(&local->mtx); 107 mutex_unlock(&local->mtx);
97 108
98 return ctx; 109 return ctx;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 388580a1bada..5672533a0832 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -309,6 +309,7 @@ struct ieee80211_roc_work {
309 struct ieee80211_channel *chan; 309 struct ieee80211_channel *chan;
310 310
311 bool started, abort, hw_begun, notified; 311 bool started, abort, hw_begun, notified;
312 bool to_be_freed;
312 313
313 unsigned long hw_start_time; 314 unsigned long hw_start_time;
314 315
@@ -1347,7 +1348,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local);
1347void ieee80211_roc_setup(struct ieee80211_local *local); 1348void ieee80211_roc_setup(struct ieee80211_local *local);
1348void ieee80211_start_next_roc(struct ieee80211_local *local); 1349void ieee80211_start_next_roc(struct ieee80211_local *local);
1349void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); 1350void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
1350void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc); 1351void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free);
1351void ieee80211_sw_roc_work(struct work_struct *work); 1352void ieee80211_sw_roc_work(struct work_struct *work);
1352void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); 1353void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
1353 1354
@@ -1361,6 +1362,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
1361 enum nl80211_iftype type); 1362 enum nl80211_iftype type);
1362void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); 1363void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata);
1363void ieee80211_remove_interfaces(struct ieee80211_local *local); 1364void ieee80211_remove_interfaces(struct ieee80211_local *local);
1365u32 ieee80211_idle_off(struct ieee80211_local *local);
1364void ieee80211_recalc_idle(struct ieee80211_local *local); 1366void ieee80211_recalc_idle(struct ieee80211_local *local);
1365void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, 1367void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
1366 const int offset); 1368 const int offset);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index baaa8608e52d..58150f877ec3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -78,7 +78,7 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
78 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); 78 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
79} 79}
80 80
81static u32 ieee80211_idle_off(struct ieee80211_local *local) 81u32 ieee80211_idle_off(struct ieee80211_local *local)
82{ 82{
83 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) 83 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
84 return 0; 84 return 0;
@@ -349,21 +349,19 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
349static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) 349static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
350{ 350{
351 struct ieee80211_sub_if_data *sdata; 351 struct ieee80211_sub_if_data *sdata;
352 int ret = 0; 352 int ret;
353 353
354 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) 354 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
355 return 0; 355 return 0;
356 356
357 mutex_lock(&local->iflist_mtx); 357 ASSERT_RTNL();
358 358
359 if (local->monitor_sdata) 359 if (local->monitor_sdata)
360 goto out_unlock; 360 return 0;
361 361
362 sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); 362 sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
363 if (!sdata) { 363 if (!sdata)
364 ret = -ENOMEM; 364 return -ENOMEM;
365 goto out_unlock;
366 }
367 365
368 /* set up data */ 366 /* set up data */
369 sdata->local = local; 367 sdata->local = local;
@@ -377,13 +375,13 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
377 if (WARN_ON(ret)) { 375 if (WARN_ON(ret)) {
378 /* ok .. stupid driver, it asked for this! */ 376 /* ok .. stupid driver, it asked for this! */
379 kfree(sdata); 377 kfree(sdata);
380 goto out_unlock; 378 return ret;
381 } 379 }
382 380
383 ret = ieee80211_check_queues(sdata); 381 ret = ieee80211_check_queues(sdata);
384 if (ret) { 382 if (ret) {
385 kfree(sdata); 383 kfree(sdata);
386 goto out_unlock; 384 return ret;
387 } 385 }
388 386
389 ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, 387 ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
@@ -391,13 +389,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
391 if (ret) { 389 if (ret) {
392 drv_remove_interface(local, sdata); 390 drv_remove_interface(local, sdata);
393 kfree(sdata); 391 kfree(sdata);
394 goto out_unlock; 392 return ret;
395 } 393 }
396 394
395 mutex_lock(&local->iflist_mtx);
397 rcu_assign_pointer(local->monitor_sdata, sdata); 396 rcu_assign_pointer(local->monitor_sdata, sdata);
398 out_unlock:
399 mutex_unlock(&local->iflist_mtx); 397 mutex_unlock(&local->iflist_mtx);
400 return ret; 398
399 return 0;
401} 400}
402 401
403static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) 402static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
@@ -407,14 +406,20 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
407 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) 406 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
408 return; 407 return;
409 408
409 ASSERT_RTNL();
410
410 mutex_lock(&local->iflist_mtx); 411 mutex_lock(&local->iflist_mtx);
411 412
412 sdata = rcu_dereference_protected(local->monitor_sdata, 413 sdata = rcu_dereference_protected(local->monitor_sdata,
413 lockdep_is_held(&local->iflist_mtx)); 414 lockdep_is_held(&local->iflist_mtx));
414 if (!sdata) 415 if (!sdata) {
415 goto out_unlock; 416 mutex_unlock(&local->iflist_mtx);
417 return;
418 }
416 419
417 rcu_assign_pointer(local->monitor_sdata, NULL); 420 rcu_assign_pointer(local->monitor_sdata, NULL);
421 mutex_unlock(&local->iflist_mtx);
422
418 synchronize_net(); 423 synchronize_net();
419 424
420 ieee80211_vif_release_channel(sdata); 425 ieee80211_vif_release_channel(sdata);
@@ -422,8 +427,6 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
422 drv_remove_interface(local, sdata); 427 drv_remove_interface(local, sdata);
423 428
424 kfree(sdata); 429 kfree(sdata);
425 out_unlock:
426 mutex_unlock(&local->iflist_mtx);
427} 430}
428 431
429/* 432/*
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 29ce2aa87e7b..4749b3858695 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1060,7 +1060,8 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
1060 1060
1061 rcu_read_lock(); 1061 rcu_read_lock();
1062 list_for_each_entry_rcu(sdata, &local->interfaces, list) 1062 list_for_each_entry_rcu(sdata, &local->interfaces, list)
1063 if (ieee80211_vif_is_mesh(&sdata->vif)) 1063 if (ieee80211_vif_is_mesh(&sdata->vif) &&
1064 ieee80211_sdata_running(sdata))
1064 ieee80211_queue_work(&local->hw, &sdata->work); 1065 ieee80211_queue_work(&local->hw, &sdata->work);
1065 rcu_read_unlock(); 1066 rcu_read_unlock();
1066} 1067}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 141577412d84..82cc30318a86 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3608,8 +3608,10 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
3608 3608
3609 /* Restart STA timers */ 3609 /* Restart STA timers */
3610 rcu_read_lock(); 3610 rcu_read_lock();
3611 list_for_each_entry_rcu(sdata, &local->interfaces, list) 3611 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
3612 ieee80211_restart_sta_timer(sdata); 3612 if (ieee80211_sdata_running(sdata))
3613 ieee80211_restart_sta_timer(sdata);
3614 }
3613 rcu_read_unlock(); 3615 rcu_read_unlock();
3614} 3616}
3615 3617
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index cc79b4a2e821..430bd254e496 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -297,10 +297,13 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
297 } 297 }
298} 298}
299 299
300void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) 300void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free)
301{ 301{
302 struct ieee80211_roc_work *dep, *tmp; 302 struct ieee80211_roc_work *dep, *tmp;
303 303
304 if (WARN_ON(roc->to_be_freed))
305 return;
306
304 /* was never transmitted */ 307 /* was never transmitted */
305 if (roc->frame) { 308 if (roc->frame) {
306 cfg80211_mgmt_tx_status(&roc->sdata->wdev, 309 cfg80211_mgmt_tx_status(&roc->sdata->wdev,
@@ -316,9 +319,12 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
316 GFP_KERNEL); 319 GFP_KERNEL);
317 320
318 list_for_each_entry_safe(dep, tmp, &roc->dependents, list) 321 list_for_each_entry_safe(dep, tmp, &roc->dependents, list)
319 ieee80211_roc_notify_destroy(dep); 322 ieee80211_roc_notify_destroy(dep, true);
320 323
321 kfree(roc); 324 if (free)
325 kfree(roc);
326 else
327 roc->to_be_freed = true;
322} 328}
323 329
324void ieee80211_sw_roc_work(struct work_struct *work) 330void ieee80211_sw_roc_work(struct work_struct *work)
@@ -331,6 +337,9 @@ void ieee80211_sw_roc_work(struct work_struct *work)
331 337
332 mutex_lock(&local->mtx); 338 mutex_lock(&local->mtx);
333 339
340 if (roc->to_be_freed)
341 goto out_unlock;
342
334 if (roc->abort) 343 if (roc->abort)
335 goto finish; 344 goto finish;
336 345
@@ -370,7 +379,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
370 finish: 379 finish:
371 list_del(&roc->list); 380 list_del(&roc->list);
372 started = roc->started; 381 started = roc->started;
373 ieee80211_roc_notify_destroy(roc); 382 ieee80211_roc_notify_destroy(roc, !roc->abort);
374 383
375 if (started) { 384 if (started) {
376 drv_flush(local, false); 385 drv_flush(local, false);
@@ -410,7 +419,7 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
410 419
411 list_del(&roc->list); 420 list_del(&roc->list);
412 421
413 ieee80211_roc_notify_destroy(roc); 422 ieee80211_roc_notify_destroy(roc, true);
414 423
415 /* if there's another roc, start it now */ 424 /* if there's another roc, start it now */
416 ieee80211_start_next_roc(local); 425 ieee80211_start_next_roc(local);
@@ -460,12 +469,14 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)
460 list_for_each_entry_safe(roc, tmp, &tmp_list, list) { 469 list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
461 if (local->ops->remain_on_channel) { 470 if (local->ops->remain_on_channel) {
462 list_del(&roc->list); 471 list_del(&roc->list);
463 ieee80211_roc_notify_destroy(roc); 472 ieee80211_roc_notify_destroy(roc, true);
464 } else { 473 } else {
465 ieee80211_queue_delayed_work(&local->hw, &roc->work, 0); 474 ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
466 475
467 /* work will clean up etc */ 476 /* work will clean up etc */
468 flush_delayed_work(&roc->work); 477 flush_delayed_work(&roc->work);
478 WARN_ON(!roc->to_be_freed);
479 kfree(roc);
469 } 480 }
470 } 481 }
471 482
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bb73ed2d20b9..c6844ad080be 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2675,7 +2675,19 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2675 2675
2676 memset(nskb->cb, 0, sizeof(nskb->cb)); 2676 memset(nskb->cb, 0, sizeof(nskb->cb));
2677 2677
2678 ieee80211_tx_skb(rx->sdata, nskb); 2678 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
2679 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
2680
2681 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
2682 IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
2683 IEEE80211_TX_CTL_NO_CCK_RATE;
2684 if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
2685 info->hw_queue =
2686 local->hw.offchannel_tx_hw_queue;
2687 }
2688
2689 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
2690 status->band);
2679 } 2691 }
2680 dev_kfree_skb(rx->skb); 2692 dev_kfree_skb(rx->skb);
2681 return RX_QUEUED; 2693 return RX_QUEUED;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index a79ce820cb50..238a0cca320e 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -766,6 +766,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
766 struct ieee80211_local *local; 766 struct ieee80211_local *local;
767 struct ieee80211_sub_if_data *sdata; 767 struct ieee80211_sub_if_data *sdata;
768 int ret, i; 768 int ret, i;
769 bool have_key = false;
769 770
770 might_sleep(); 771 might_sleep();
771 772
@@ -793,12 +794,19 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
793 list_del_rcu(&sta->list); 794 list_del_rcu(&sta->list);
794 795
795 mutex_lock(&local->key_mtx); 796 mutex_lock(&local->key_mtx);
796 for (i = 0; i < NUM_DEFAULT_KEYS; i++) 797 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
797 __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); 798 __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]));
798 if (sta->ptk) 799 have_key = true;
800 }
801 if (sta->ptk) {
799 __ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); 802 __ieee80211_key_free(key_mtx_dereference(local, sta->ptk));
803 have_key = true;
804 }
800 mutex_unlock(&local->key_mtx); 805 mutex_unlock(&local->key_mtx);
801 806
807 if (!have_key)
808 synchronize_net();
809
802 sta->dead = true; 810 sta->dead = true;
803 811
804 local->num_sta--; 812 local->num_sta--;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 47edf5a40a59..61f49d241712 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1394,10 +1394,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1394 skb_reset_network_header(skb); 1394 skb_reset_network_header(skb);
1395 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", 1395 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
1396 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); 1396 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
1397 rcu_read_lock();
1398 ipv4_update_pmtu(skb, dev_net(skb->dev), 1397 ipv4_update_pmtu(skb, dev_net(skb->dev),
1399 mtu, 0, 0, 0, 0); 1398 mtu, 0, 0, 0, 0);
1400 rcu_read_unlock();
1401 /* Client uses PMTUD? */ 1399 /* Client uses PMTUD? */
1402 if (!(cih->frag_off & htons(IP_DF))) 1400 if (!(cih->frag_off & htons(IP_DF)))
1403 goto ignore_ipip; 1401 goto ignore_ipip;
@@ -1577,7 +1575,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1577 } 1575 }
1578 /* ipvs enabled in this netns ? */ 1576 /* ipvs enabled in this netns ? */
1579 net = skb_net(skb); 1577 net = skb_net(skb);
1580 if (!net_ipvs(net)->enable) 1578 ipvs = net_ipvs(net);
1579 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1581 return NF_ACCEPT; 1580 return NF_ACCEPT;
1582 1581
1583 ip_vs_fill_iph_skb(af, skb, &iph); 1582 ip_vs_fill_iph_skb(af, skb, &iph);
@@ -1654,7 +1653,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1654 } 1653 }
1655 1654
1656 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); 1655 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
1657 ipvs = net_ipvs(net);
1658 /* Check the server status */ 1656 /* Check the server status */
1659 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { 1657 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1660 /* the destination server is not available */ 1658 /* the destination server is not available */
@@ -1815,13 +1813,15 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
1815{ 1813{
1816 int r; 1814 int r;
1817 struct net *net; 1815 struct net *net;
1816 struct netns_ipvs *ipvs;
1818 1817
1819 if (ip_hdr(skb)->protocol != IPPROTO_ICMP) 1818 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
1820 return NF_ACCEPT; 1819 return NF_ACCEPT;
1821 1820
1822 /* ipvs enabled in this netns ? */ 1821 /* ipvs enabled in this netns ? */
1823 net = skb_net(skb); 1822 net = skb_net(skb);
1824 if (!net_ipvs(net)->enable) 1823 ipvs = net_ipvs(net);
1824 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1825 return NF_ACCEPT; 1825 return NF_ACCEPT;
1826 1826
1827 return ip_vs_in_icmp(skb, &r, hooknum); 1827 return ip_vs_in_icmp(skb, &r, hooknum);
@@ -1835,6 +1835,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1835{ 1835{
1836 int r; 1836 int r;
1837 struct net *net; 1837 struct net *net;
1838 struct netns_ipvs *ipvs;
1838 struct ip_vs_iphdr iphdr; 1839 struct ip_vs_iphdr iphdr;
1839 1840
1840 ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr); 1841 ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr);
@@ -1843,7 +1844,8 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1843 1844
1844 /* ipvs enabled in this netns ? */ 1845 /* ipvs enabled in this netns ? */
1845 net = skb_net(skb); 1846 net = skb_net(skb);
1846 if (!net_ipvs(net)->enable) 1847 ipvs = net_ipvs(net);
1848 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1847 return NF_ACCEPT; 1849 return NF_ACCEPT;
1848 1850
1849 return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr); 1851 return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c68198bf9128..9e2d1cccd1eb 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1808,6 +1808,12 @@ static struct ctl_table vs_vars[] = {
1808 .mode = 0644, 1808 .mode = 0644,
1809 .proc_handler = proc_dointvec, 1809 .proc_handler = proc_dointvec,
1810 }, 1810 },
1811 {
1812 .procname = "backup_only",
1813 .maxlen = sizeof(int),
1814 .mode = 0644,
1815 .proc_handler = proc_dointvec,
1816 },
1811#ifdef CONFIG_IP_VS_DEBUG 1817#ifdef CONFIG_IP_VS_DEBUG
1812 { 1818 {
1813 .procname = "debug_level", 1819 .procname = "debug_level",
@@ -3741,6 +3747,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3741 tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; 3747 tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
3742 ipvs->sysctl_pmtu_disc = 1; 3748 ipvs->sysctl_pmtu_disc = 1;
3743 tbl[idx++].data = &ipvs->sysctl_pmtu_disc; 3749 tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
3750 tbl[idx++].data = &ipvs->sysctl_backup_only;
3744 3751
3745 3752
3746 ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); 3753 ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index ae8ec6f27688..cd1d7298f7ba 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -906,7 +906,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
906 sctp_chunkhdr_t _sctpch, *sch; 906 sctp_chunkhdr_t _sctpch, *sch;
907 unsigned char chunk_type; 907 unsigned char chunk_type;
908 int event, next_state; 908 int event, next_state;
909 int ihl; 909 int ihl, cofs;
910 910
911#ifdef CONFIG_IP_VS_IPV6 911#ifdef CONFIG_IP_VS_IPV6
912 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); 912 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
@@ -914,8 +914,8 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
914 ihl = ip_hdrlen(skb); 914 ihl = ip_hdrlen(skb);
915#endif 915#endif
916 916
917 sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t), 917 cofs = ihl + sizeof(sctp_sctphdr_t);
918 sizeof(_sctpch), &_sctpch); 918 sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch);
919 if (sch == NULL) 919 if (sch == NULL)
920 return; 920 return;
921 921
@@ -933,10 +933,12 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
933 */ 933 */
934 if ((sch->type == SCTP_CID_COOKIE_ECHO) || 934 if ((sch->type == SCTP_CID_COOKIE_ECHO) ||
935 (sch->type == SCTP_CID_COOKIE_ACK)) { 935 (sch->type == SCTP_CID_COOKIE_ACK)) {
936 sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) + 936 int clen = ntohs(sch->length);
937 sch->length), sizeof(_sctpch), &_sctpch); 937
938 if (sch) { 938 if (clen >= sizeof(sctp_chunkhdr_t)) {
939 if (sch->type == SCTP_CID_ABORT) 939 sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4),
940 sizeof(_sctpch), &_sctpch);
941 if (sch && sch->type == SCTP_CID_ABORT)
940 chunk_type = sch->type; 942 chunk_type = sch->type;
941 } 943 }
942 } 944 }
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 432f95780003..ba65b2041eb4 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -969,6 +969,10 @@ static int __init nf_conntrack_proto_dccp_init(void)
969{ 969{
970 int ret; 970 int ret;
971 971
972 ret = register_pernet_subsys(&dccp_net_ops);
973 if (ret < 0)
974 goto out_pernet;
975
972 ret = nf_ct_l4proto_register(&dccp_proto4); 976 ret = nf_ct_l4proto_register(&dccp_proto4);
973 if (ret < 0) 977 if (ret < 0)
974 goto out_dccp4; 978 goto out_dccp4;
@@ -977,16 +981,12 @@ static int __init nf_conntrack_proto_dccp_init(void)
977 if (ret < 0) 981 if (ret < 0)
978 goto out_dccp6; 982 goto out_dccp6;
979 983
980 ret = register_pernet_subsys(&dccp_net_ops);
981 if (ret < 0)
982 goto out_pernet;
983
984 return 0; 984 return 0;
985out_pernet:
986 nf_ct_l4proto_unregister(&dccp_proto6);
987out_dccp6: 985out_dccp6:
988 nf_ct_l4proto_unregister(&dccp_proto4); 986 nf_ct_l4proto_unregister(&dccp_proto4);
989out_dccp4: 987out_dccp4:
988 unregister_pernet_subsys(&dccp_net_ops);
989out_pernet:
990 return ret; 990 return ret;
991} 991}
992 992
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index bd7d01d9c7e7..155ce9f8a0db 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -420,18 +420,18 @@ static int __init nf_ct_proto_gre_init(void)
420{ 420{
421 int ret; 421 int ret;
422 422
423 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4);
424 if (ret < 0)
425 goto out_gre4;
426
427 ret = register_pernet_subsys(&proto_gre_net_ops); 423 ret = register_pernet_subsys(&proto_gre_net_ops);
428 if (ret < 0) 424 if (ret < 0)
429 goto out_pernet; 425 goto out_pernet;
430 426
427 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4);
428 if (ret < 0)
429 goto out_gre4;
430
431 return 0; 431 return 0;
432out_pernet:
433 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4);
434out_gre4: 432out_gre4:
433 unregister_pernet_subsys(&proto_gre_net_ops);
434out_pernet:
435 return ret; 435 return ret;
436} 436}
437 437
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 480f616d5936..ec83536def9a 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -888,6 +888,10 @@ static int __init nf_conntrack_proto_sctp_init(void)
888{ 888{
889 int ret; 889 int ret;
890 890
891 ret = register_pernet_subsys(&sctp_net_ops);
892 if (ret < 0)
893 goto out_pernet;
894
891 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4); 895 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4);
892 if (ret < 0) 896 if (ret < 0)
893 goto out_sctp4; 897 goto out_sctp4;
@@ -896,16 +900,12 @@ static int __init nf_conntrack_proto_sctp_init(void)
896 if (ret < 0) 900 if (ret < 0)
897 goto out_sctp6; 901 goto out_sctp6;
898 902
899 ret = register_pernet_subsys(&sctp_net_ops);
900 if (ret < 0)
901 goto out_pernet;
902
903 return 0; 903 return 0;
904out_pernet:
905 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6);
906out_sctp6: 904out_sctp6:
907 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4); 905 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
908out_sctp4: 906out_sctp4:
907 unregister_pernet_subsys(&sctp_net_ops);
908out_pernet:
909 return ret; 909 return ret;
910} 910}
911 911
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 157489581c31..ca969f6273f7 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -371,6 +371,10 @@ static int __init nf_conntrack_proto_udplite_init(void)
371{ 371{
372 int ret; 372 int ret;
373 373
374 ret = register_pernet_subsys(&udplite_net_ops);
375 if (ret < 0)
376 goto out_pernet;
377
374 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4); 378 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4);
375 if (ret < 0) 379 if (ret < 0)
376 goto out_udplite4; 380 goto out_udplite4;
@@ -379,16 +383,12 @@ static int __init nf_conntrack_proto_udplite_init(void)
379 if (ret < 0) 383 if (ret < 0)
380 goto out_udplite6; 384 goto out_udplite6;
381 385
382 ret = register_pernet_subsys(&udplite_net_ops);
383 if (ret < 0)
384 goto out_pernet;
385
386 return 0; 386 return 0;
387out_pernet:
388 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6);
389out_udplite6: 387out_udplite6:
390 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4); 388 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
391out_udplite4: 389out_udplite4:
390 unregister_pernet_subsys(&udplite_net_ops);
391out_pernet:
392 return ret; 392 return ret;
393} 393}
394 394
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 6bcce401fd1c..fedee3943661 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -568,6 +568,7 @@ static int __init nf_conntrack_standalone_init(void)
568 register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); 568 register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
569 if (!nf_ct_netfilter_header) { 569 if (!nf_ct_netfilter_header) {
570 pr_err("nf_conntrack: can't register to sysctl.\n"); 570 pr_err("nf_conntrack: can't register to sysctl.\n");
571 ret = -ENOMEM;
571 goto out_sysctl; 572 goto out_sysctl;
572 } 573 }
573#endif 574#endif
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 589d686f0b4c..dc3fd5d44464 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -49,6 +49,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
49 return -EINVAL; 49 return -EINVAL;
50 50
51 acct_name = nla_data(tb[NFACCT_NAME]); 51 acct_name = nla_data(tb[NFACCT_NAME]);
52 if (strlen(acct_name) == 0)
53 return -EINVAL;
52 54
53 list_for_each_entry(nfacct, &nfnl_acct_list, head) { 55 list_for_each_entry(nfacct, &nfnl_acct_list, head) {
54 if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) 56 if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 858fd52c1040..42680b2baa11 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -112,7 +112,7 @@ instance_create(u_int16_t queue_num, int portid)
112 inst->queue_num = queue_num; 112 inst->queue_num = queue_num;
113 inst->peer_portid = portid; 113 inst->peer_portid = portid;
114 inst->queue_maxlen = NFQNL_QMAX_DEFAULT; 114 inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
115 inst->copy_range = 0xfffff; 115 inst->copy_range = 0xffff;
116 inst->copy_mode = NFQNL_COPY_NONE; 116 inst->copy_mode = NFQNL_COPY_NONE;
117 spin_lock_init(&inst->lock); 117 spin_lock_init(&inst->lock);
118 INIT_LIST_HEAD(&inst->queue_list); 118 INIT_LIST_HEAD(&inst->queue_list);
@@ -1062,8 +1062,10 @@ static int __init nfnetlink_queue_init(void)
1062 1062
1063#ifdef CONFIG_PROC_FS 1063#ifdef CONFIG_PROC_FS
1064 if (!proc_create("nfnetlink_queue", 0440, 1064 if (!proc_create("nfnetlink_queue", 0440,
1065 proc_net_netfilter, &nfqnl_file_ops)) 1065 proc_net_netfilter, &nfqnl_file_ops)) {
1066 status = -ENOMEM;
1066 goto cleanup_subsys; 1067 goto cleanup_subsys;
1068 }
1067#endif 1069#endif
1068 1070
1069 register_netdevice_notifier(&nfqnl_dev_notifier); 1071 register_netdevice_notifier(&nfqnl_dev_notifier);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index f2aabb6f4105..5a55be3f17a5 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -142,6 +142,7 @@ int genl_register_mc_group(struct genl_family *family,
142 int err = 0; 142 int err = 0;
143 143
144 BUG_ON(grp->name[0] == '\0'); 144 BUG_ON(grp->name[0] == '\0');
145 BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL);
145 146
146 genl_lock(); 147 genl_lock();
147 148
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index d1fa1d9ffd2e..103bd704b5fc 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1173,6 +1173,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
1173 } 1173 }
1174 1174
1175 if (sax != NULL) { 1175 if (sax != NULL) {
1176 memset(sax, 0, sizeof(*sax));
1176 sax->sax25_family = AF_NETROM; 1177 sax->sax25_family = AF_NETROM;
1177 skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, 1178 skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
1178 AX25_ADDR_LEN); 1179 AX25_ADDR_LEN);
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index b530afadd76c..ee25f25f0cd6 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -107,8 +107,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen,
107 accept_sk->sk_state_change(sk); 107 accept_sk->sk_state_change(sk);
108 108
109 bh_unlock_sock(accept_sk); 109 bh_unlock_sock(accept_sk);
110
111 sock_orphan(accept_sk);
112 } 110 }
113 111
114 if (listen == true) { 112 if (listen == true) {
@@ -134,8 +132,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen,
134 132
135 bh_unlock_sock(sk); 133 bh_unlock_sock(sk);
136 134
137 sock_orphan(sk);
138
139 sk_del_node_init(sk); 135 sk_del_node_init(sk);
140 } 136 }
141 137
@@ -164,8 +160,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen,
164 160
165 bh_unlock_sock(sk); 161 bh_unlock_sock(sk);
166 162
167 sock_orphan(sk);
168
169 sk_del_node_init(sk); 163 sk_del_node_init(sk);
170 } 164 }
171 165
@@ -827,7 +821,6 @@ static void nfc_llcp_recv_ui(struct nfc_llcp_local *local,
827 skb_get(skb); 821 skb_get(skb);
828 } else { 822 } else {
829 pr_err("Receive queue is full\n"); 823 pr_err("Receive queue is full\n");
830 kfree_skb(skb);
831 } 824 }
832 825
833 nfc_llcp_sock_put(llcp_sock); 826 nfc_llcp_sock_put(llcp_sock);
@@ -1028,7 +1021,6 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
1028 skb_get(skb); 1021 skb_get(skb);
1029 } else { 1022 } else {
1030 pr_err("Receive queue is full\n"); 1023 pr_err("Receive queue is full\n");
1031 kfree_skb(skb);
1032 } 1024 }
1033 } 1025 }
1034 1026
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 5c7cdf3f2a83..6c94447ec414 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -270,7 +270,9 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
270 } 270 }
271 271
272 if (sk->sk_state == LLCP_CONNECTED || !newsock) { 272 if (sk->sk_state == LLCP_CONNECTED || !newsock) {
273 nfc_llcp_accept_unlink(sk); 273 list_del_init(&lsk->accept_queue);
274 sock_put(sk);
275
274 if (newsock) 276 if (newsock)
275 sock_graft(sk, newsock); 277 sock_graft(sk, newsock);
276 278
@@ -464,8 +466,6 @@ static int llcp_sock_release(struct socket *sock)
464 nfc_llcp_accept_unlink(accept_sk); 466 nfc_llcp_accept_unlink(accept_sk);
465 467
466 release_sock(accept_sk); 468 release_sock(accept_sk);
467
468 sock_orphan(accept_sk);
469 } 469 }
470 } 470 }
471 471
@@ -646,6 +646,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
646 646
647 pr_debug("%p %zu\n", sk, len); 647 pr_debug("%p %zu\n", sk, len);
648 648
649 msg->msg_namelen = 0;
650
649 lock_sock(sk); 651 lock_sock(sk);
650 652
651 if (sk->sk_state == LLCP_CLOSED && 653 if (sk->sk_state == LLCP_CLOSED &&
@@ -691,6 +693,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
691 693
692 pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); 694 pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap);
693 695
696 memset(sockaddr, 0, sizeof(*sockaddr));
694 sockaddr->sa_family = AF_NFC; 697 sockaddr->sa_family = AF_NFC;
695 sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP; 698 sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP;
696 sockaddr->dsap = ui_cb->dsap; 699 sockaddr->dsap = ui_cb->dsap;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index cf68e6e4054a..9c8347451597 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1253 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1253 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1254 1254
1255 if (srose != NULL) { 1255 if (srose != NULL) {
1256 memset(srose, 0, msg->msg_namelen);
1256 srose->srose_family = AF_ROSE; 1257 srose->srose_family = AF_ROSE;
1257 srose->srose_addr = rose->dest_addr; 1258 srose->srose_addr = rose->dest_addr;
1258 srose->srose_call = rose->dest_call; 1259 srose->srose_call = rose->dest_call;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 13aa47aa2ffb..1bc210ffcba2 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -962,8 +962,11 @@ cbq_dequeue(struct Qdisc *sch)
962 cbq_update(q); 962 cbq_update(q);
963 if ((incr -= incr2) < 0) 963 if ((incr -= incr2) < 0)
964 incr = 0; 964 incr = 0;
965 q->now += incr;
966 } else {
967 if (now > q->now)
968 q->now = now;
965 } 969 }
966 q->now += incr;
967 q->now_rt = now; 970 q->now_rt = now;
968 971
969 for (;;) { 972 for (;;) {
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 4e606fcb2534..55786283a3df 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
195 flow->deficit = q->quantum; 195 flow->deficit = q->quantum;
196 flow->dropped = 0; 196 flow->dropped = 0;
197 } 197 }
198 if (++sch->q.qlen < sch->limit) 198 if (++sch->q.qlen <= sch->limit)
199 return NET_XMIT_SUCCESS; 199 return NET_XMIT_SUCCESS;
200 200
201 q->drop_overlimit++; 201 q->drop_overlimit++;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ffad48109a22..eac7e0ee23c1 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -904,7 +904,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
904 u64 mult; 904 u64 mult;
905 int shift; 905 int shift;
906 906
907 r->rate_bps = rate << 3; 907 r->rate_bps = (u64)rate << 3;
908 r->shift = 0; 908 r->shift = 0;
909 r->mult = 1; 909 r->mult = 1;
910 /* 910 /*
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index dcc446e7fbf6..d5f35f15af98 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -304,10 +304,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
304 err = rpciod_up(); 304 err = rpciod_up();
305 if (err) 305 if (err)
306 goto out_no_rpciod; 306 goto out_no_rpciod;
307 err = -EINVAL;
308 if (!xprt)
309 goto out_no_xprt;
310 307
308 err = -EINVAL;
311 if (args->version >= program->nrvers) 309 if (args->version >= program->nrvers)
312 goto out_err; 310 goto out_err;
313 version = program->version[args->version]; 311 version = program->version[args->version];
@@ -382,10 +380,9 @@ out_no_principal:
382out_no_stats: 380out_no_stats:
383 kfree(clnt); 381 kfree(clnt);
384out_err: 382out_err:
385 xprt_put(xprt);
386out_no_xprt:
387 rpciod_down(); 383 rpciod_down();
388out_no_rpciod: 384out_no_rpciod:
385 xprt_put(xprt);
389 return ERR_PTR(err); 386 return ERR_PTR(err);
390} 387}
391 388
@@ -512,7 +509,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
512 new = rpc_new_client(args, xprt); 509 new = rpc_new_client(args, xprt);
513 if (IS_ERR(new)) { 510 if (IS_ERR(new)) {
514 err = PTR_ERR(new); 511 err = PTR_ERR(new);
515 goto out_put; 512 goto out_err;
516 } 513 }
517 514
518 atomic_inc(&clnt->cl_count); 515 atomic_inc(&clnt->cl_count);
@@ -525,8 +522,6 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
525 new->cl_chatty = clnt->cl_chatty; 522 new->cl_chatty = clnt->cl_chatty;
526 return new; 523 return new;
527 524
528out_put:
529 xprt_put(xprt);
530out_err: 525out_err:
531 dprintk("RPC: %s: returned error %d\n", __func__, err); 526 dprintk("RPC: %s: returned error %d\n", __func__, err);
532 return ERR_PTR(err); 527 return ERR_PTR(err);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index fb20f25ddec9..f8529fc8e542 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -180,6 +180,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
180 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 180 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
181 task->tk_waitqueue = queue; 181 task->tk_waitqueue = queue;
182 queue->qlen++; 182 queue->qlen++;
183 /* barrier matches the read in rpc_wake_up_task_queue_locked() */
184 smp_wmb();
183 rpc_set_queued(task); 185 rpc_set_queued(task);
184 186
185 dprintk("RPC: %5u added to queue %p \"%s\"\n", 187 dprintk("RPC: %5u added to queue %p \"%s\"\n",
@@ -430,8 +432,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
430 */ 432 */
431static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) 433static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
432{ 434{
433 if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) 435 if (RPC_IS_QUEUED(task)) {
434 __rpc_do_wake_up_task(queue, task); 436 smp_rmb();
437 if (task->tk_waitqueue == queue)
438 __rpc_do_wake_up_task(queue, task);
439 }
435} 440}
436 441
437/* 442/*
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index a9622b6cd916..515ce38e4f4c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -790,6 +790,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
790 if (addr) { 790 if (addr) {
791 addr->family = AF_TIPC; 791 addr->family = AF_TIPC;
792 addr->addrtype = TIPC_ADDR_ID; 792 addr->addrtype = TIPC_ADDR_ID;
793 memset(&addr->addr, 0, sizeof(addr->addr));
793 addr->addr.id.ref = msg_origport(msg); 794 addr->addr.id.ref = msg_origport(msg);
794 addr->addr.id.node = msg_orignode(msg); 795 addr->addr.id.node = msg_orignode(msg);
795 addr->addr.name.domain = 0; /* could leave uninitialized */ 796 addr->addr.name.domain = 0; /* could leave uninitialized */
@@ -904,6 +905,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
904 goto exit; 905 goto exit;
905 } 906 }
906 907
908 /* will be updated in set_orig_addr() if needed */
909 m->msg_namelen = 0;
910
907 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 911 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
908restart: 912restart:
909 913
@@ -1013,6 +1017,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
1013 goto exit; 1017 goto exit;
1014 } 1018 }
1015 1019
1020 /* will be updated in set_orig_addr() if needed */
1021 m->msg_namelen = 0;
1022
1016 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); 1023 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1017 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1024 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1018 1025
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 51be64f163ec..2db702d82e7d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -382,7 +382,7 @@ static void unix_sock_destructor(struct sock *sk)
382#endif 382#endif
383} 383}
384 384
385static int unix_release_sock(struct sock *sk, int embrion) 385static void unix_release_sock(struct sock *sk, int embrion)
386{ 386{
387 struct unix_sock *u = unix_sk(sk); 387 struct unix_sock *u = unix_sk(sk);
388 struct path path; 388 struct path path;
@@ -451,8 +451,6 @@ static int unix_release_sock(struct sock *sk, int embrion)
451 451
452 if (unix_tot_inflight) 452 if (unix_tot_inflight)
453 unix_gc(); /* Garbage collect fds */ 453 unix_gc(); /* Garbage collect fds */
454
455 return 0;
456} 454}
457 455
458static void init_peercred(struct sock *sk) 456static void init_peercred(struct sock *sk)
@@ -699,9 +697,10 @@ static int unix_release(struct socket *sock)
699 if (!sk) 697 if (!sk)
700 return 0; 698 return 0;
701 699
700 unix_release_sock(sk, 0);
702 sock->sk = NULL; 701 sock->sk = NULL;
703 702
704 return unix_release_sock(sk, 0); 703 return 0;
705} 704}
706 705
707static int unix_autobind(struct socket *sock) 706static int unix_autobind(struct socket *sock)
@@ -1994,7 +1993,7 @@ again:
1994 if ((UNIXCB(skb).pid != siocb->scm->pid) || 1993 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1995 (UNIXCB(skb).cred != siocb->scm->cred)) 1994 (UNIXCB(skb).cred != siocb->scm->cred))
1996 break; 1995 break;
1997 } else { 1996 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
1998 /* Copy credentials */ 1997 /* Copy credentials */
1999 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); 1998 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
2000 check_creds = 1; 1999 check_creds = 1;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index ca511c4f388a..7f93e2a42d7a 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -207,7 +207,7 @@ static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
207 struct vsock_sock *vsk; 207 struct vsock_sock *vsk;
208 208
209 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) 209 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table)
210 if (vsock_addr_equals_addr_any(addr, &vsk->local_addr)) 210 if (addr->svm_port == vsk->local_addr.svm_port)
211 return sk_vsock(vsk); 211 return sk_vsock(vsk);
212 212
213 return NULL; 213 return NULL;
@@ -220,8 +220,8 @@ static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
220 220
221 list_for_each_entry(vsk, vsock_connected_sockets(src, dst), 221 list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
222 connected_table) { 222 connected_table) {
223 if (vsock_addr_equals_addr(src, &vsk->remote_addr) 223 if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
224 && vsock_addr_equals_addr(dst, &vsk->local_addr)) { 224 dst->svm_port == vsk->local_addr.svm_port) {
225 return sk_vsock(vsk); 225 return sk_vsock(vsk);
226 } 226 }
227 } 227 }
@@ -1670,6 +1670,8 @@ vsock_stream_recvmsg(struct kiocb *kiocb,
1670 vsk = vsock_sk(sk); 1670 vsk = vsock_sk(sk);
1671 err = 0; 1671 err = 0;
1672 1672
1673 msg->msg_namelen = 0;
1674
1673 lock_sock(sk); 1675 lock_sock(sk);
1674 1676
1675 if (sk->sk_state != SS_CONNECTED) { 1677 if (sk->sk_state != SS_CONNECTED) {
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index a70ace83a153..5e04d3d96285 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -464,19 +464,16 @@ static struct sock *vmci_transport_get_pending(
464 struct vsock_sock *vlistener; 464 struct vsock_sock *vlistener;
465 struct vsock_sock *vpending; 465 struct vsock_sock *vpending;
466 struct sock *pending; 466 struct sock *pending;
467 struct sockaddr_vm src;
468
469 vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
467 470
468 vlistener = vsock_sk(listener); 471 vlistener = vsock_sk(listener);
469 472
470 list_for_each_entry(vpending, &vlistener->pending_links, 473 list_for_each_entry(vpending, &vlistener->pending_links,
471 pending_links) { 474 pending_links) {
472 struct sockaddr_vm src;
473 struct sockaddr_vm dst;
474
475 vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
476 vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
477
478 if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && 475 if (vsock_addr_equals_addr(&src, &vpending->remote_addr) &&
479 vsock_addr_equals_addr(&dst, &vpending->local_addr)) { 476 pkt->dst_port == vpending->local_addr.svm_port) {
480 pending = sk_vsock(vpending); 477 pending = sk_vsock(vpending);
481 sock_hold(pending); 478 sock_hold(pending);
482 goto found; 479 goto found;
@@ -739,10 +736,15 @@ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)
739 */ 736 */
740 bh_lock_sock(sk); 737 bh_lock_sock(sk);
741 738
742 if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED) 739 if (!sock_owned_by_user(sk)) {
743 vmci_trans(vsk)->notify_ops->handle_notify_pkt( 740 /* The local context ID may be out of date, update it. */
744 sk, pkt, true, &dst, &src, 741 vsk->local_addr.svm_cid = dst.svm_cid;
745 &bh_process_pkt); 742
743 if (sk->sk_state == SS_CONNECTED)
744 vmci_trans(vsk)->notify_ops->handle_notify_pkt(
745 sk, pkt, true, &dst, &src,
746 &bh_process_pkt);
747 }
746 748
747 bh_unlock_sock(sk); 749 bh_unlock_sock(sk);
748 750
@@ -902,6 +904,9 @@ static void vmci_transport_recv_pkt_work(struct work_struct *work)
902 904
903 lock_sock(sk); 905 lock_sock(sk);
904 906
907 /* The local context ID may be out of date. */
908 vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context;
909
905 switch (sk->sk_state) { 910 switch (sk->sk_state) {
906 case SS_LISTEN: 911 case SS_LISTEN:
907 vmci_transport_recv_listen(sk, pkt); 912 vmci_transport_recv_listen(sk, pkt);
@@ -958,6 +963,10 @@ static int vmci_transport_recv_listen(struct sock *sk,
958 pending = vmci_transport_get_pending(sk, pkt); 963 pending = vmci_transport_get_pending(sk, pkt);
959 if (pending) { 964 if (pending) {
960 lock_sock(pending); 965 lock_sock(pending);
966
967 /* The local context ID may be out of date. */
968 vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context;
969
961 switch (pending->sk_state) { 970 switch (pending->sk_state) {
962 case SS_CONNECTING: 971 case SS_CONNECTING:
963 err = vmci_transport_recv_connecting_server(sk, 972 err = vmci_transport_recv_connecting_server(sk,
@@ -1727,6 +1736,8 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
1727 if (flags & MSG_OOB || flags & MSG_ERRQUEUE) 1736 if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
1728 return -EOPNOTSUPP; 1737 return -EOPNOTSUPP;
1729 1738
1739 msg->msg_namelen = 0;
1740
1730 /* Retrieve the head sk_buff from the socket's receive queue. */ 1741 /* Retrieve the head sk_buff from the socket's receive queue. */
1731 err = 0; 1742 err = 0;
1732 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); 1743 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
@@ -1759,7 +1770,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
1759 if (err) 1770 if (err)
1760 goto out; 1771 goto out;
1761 1772
1762 msg->msg_namelen = 0;
1763 if (msg->msg_name) { 1773 if (msg->msg_name) {
1764 struct sockaddr_vm *vm_addr; 1774 struct sockaddr_vm *vm_addr;
1765 1775
diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c
index b7df1aea7c59..ec2611b4ea0e 100644
--- a/net/vmw_vsock/vsock_addr.c
+++ b/net/vmw_vsock/vsock_addr.c
@@ -64,16 +64,6 @@ bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
64} 64}
65EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); 65EXPORT_SYMBOL_GPL(vsock_addr_equals_addr);
66 66
67bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
68 const struct sockaddr_vm *other)
69{
70 return (addr->svm_cid == VMADDR_CID_ANY ||
71 other->svm_cid == VMADDR_CID_ANY ||
72 addr->svm_cid == other->svm_cid) &&
73 addr->svm_port == other->svm_port;
74}
75EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any);
76
77int vsock_addr_cast(const struct sockaddr *addr, 67int vsock_addr_cast(const struct sockaddr *addr,
78 size_t len, struct sockaddr_vm **out_addr) 68 size_t len, struct sockaddr_vm **out_addr)
79{ 69{
diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h
index cdfbcefdf843..9ccd5316eac0 100644
--- a/net/vmw_vsock/vsock_addr.h
+++ b/net/vmw_vsock/vsock_addr.h
@@ -24,8 +24,6 @@ bool vsock_addr_bound(const struct sockaddr_vm *addr);
24void vsock_addr_unbind(struct sockaddr_vm *addr); 24void vsock_addr_unbind(struct sockaddr_vm *addr);
25bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, 25bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
26 const struct sockaddr_vm *other); 26 const struct sockaddr_vm *other);
27bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
28 const struct sockaddr_vm *other);
29int vsock_addr_cast(const struct sockaddr *addr, size_t len, 27int vsock_addr_cast(const struct sockaddr *addr, size_t len,
30 struct sockaddr_vm **out_addr); 28 struct sockaddr_vm **out_addr);
31 29
diff --git a/net/wireless/core.c b/net/wireless/core.c
index ea4155fe9733..6ddf74f0ae1e 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -212,6 +212,39 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
212 rdev_rfkill_poll(rdev); 212 rdev_rfkill_poll(rdev);
213} 213}
214 214
215void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
216 struct wireless_dev *wdev)
217{
218 lockdep_assert_held(&rdev->devlist_mtx);
219 lockdep_assert_held(&rdev->sched_scan_mtx);
220
221 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE))
222 return;
223
224 if (!wdev->p2p_started)
225 return;
226
227 rdev_stop_p2p_device(rdev, wdev);
228 wdev->p2p_started = false;
229
230 rdev->opencount--;
231
232 if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
233 bool busy = work_busy(&rdev->scan_done_wk);
234
235 /*
236 * If the work isn't pending or running (in which case it would
237 * be waiting for the lock we hold) the driver didn't properly
238 * cancel the scan when the interface was removed. In this case
239 * warn and leak the scan request object to not crash later.
240 */
241 WARN_ON(!busy);
242
243 rdev->scan_req->aborted = true;
244 ___cfg80211_scan_done(rdev, !busy);
245 }
246}
247
215static int cfg80211_rfkill_set_block(void *data, bool blocked) 248static int cfg80211_rfkill_set_block(void *data, bool blocked)
216{ 249{
217 struct cfg80211_registered_device *rdev = data; 250 struct cfg80211_registered_device *rdev = data;
@@ -221,7 +254,8 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
221 return 0; 254 return 0;
222 255
223 rtnl_lock(); 256 rtnl_lock();
224 mutex_lock(&rdev->devlist_mtx); 257
258 /* read-only iteration need not hold the devlist_mtx */
225 259
226 list_for_each_entry(wdev, &rdev->wdev_list, list) { 260 list_for_each_entry(wdev, &rdev->wdev_list, list) {
227 if (wdev->netdev) { 261 if (wdev->netdev) {
@@ -231,18 +265,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
231 /* otherwise, check iftype */ 265 /* otherwise, check iftype */
232 switch (wdev->iftype) { 266 switch (wdev->iftype) {
233 case NL80211_IFTYPE_P2P_DEVICE: 267 case NL80211_IFTYPE_P2P_DEVICE:
234 if (!wdev->p2p_started) 268 /* but this requires it */
235 break; 269 mutex_lock(&rdev->devlist_mtx);
236 rdev_stop_p2p_device(rdev, wdev); 270 mutex_lock(&rdev->sched_scan_mtx);
237 wdev->p2p_started = false; 271 cfg80211_stop_p2p_device(rdev, wdev);
238 rdev->opencount--; 272 mutex_unlock(&rdev->sched_scan_mtx);
273 mutex_unlock(&rdev->devlist_mtx);
239 break; 274 break;
240 default: 275 default:
241 break; 276 break;
242 } 277 }
243 } 278 }
244 279
245 mutex_unlock(&rdev->devlist_mtx);
246 rtnl_unlock(); 280 rtnl_unlock();
247 281
248 return 0; 282 return 0;
@@ -745,17 +779,13 @@ static void wdev_cleanup_work(struct work_struct *work)
745 wdev = container_of(work, struct wireless_dev, cleanup_work); 779 wdev = container_of(work, struct wireless_dev, cleanup_work);
746 rdev = wiphy_to_dev(wdev->wiphy); 780 rdev = wiphy_to_dev(wdev->wiphy);
747 781
748 cfg80211_lock_rdev(rdev); 782 mutex_lock(&rdev->sched_scan_mtx);
749 783
750 if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { 784 if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
751 rdev->scan_req->aborted = true; 785 rdev->scan_req->aborted = true;
752 ___cfg80211_scan_done(rdev, true); 786 ___cfg80211_scan_done(rdev, true);
753 } 787 }
754 788
755 cfg80211_unlock_rdev(rdev);
756
757 mutex_lock(&rdev->sched_scan_mtx);
758
759 if (WARN_ON(rdev->sched_scan_req && 789 if (WARN_ON(rdev->sched_scan_req &&
760 rdev->sched_scan_req->dev == wdev->netdev)) { 790 rdev->sched_scan_req->dev == wdev->netdev)) {
761 __cfg80211_stop_sched_scan(rdev, false); 791 __cfg80211_stop_sched_scan(rdev, false);
@@ -781,21 +811,19 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
781 return; 811 return;
782 812
783 mutex_lock(&rdev->devlist_mtx); 813 mutex_lock(&rdev->devlist_mtx);
814 mutex_lock(&rdev->sched_scan_mtx);
784 list_del_rcu(&wdev->list); 815 list_del_rcu(&wdev->list);
785 rdev->devlist_generation++; 816 rdev->devlist_generation++;
786 817
787 switch (wdev->iftype) { 818 switch (wdev->iftype) {
788 case NL80211_IFTYPE_P2P_DEVICE: 819 case NL80211_IFTYPE_P2P_DEVICE:
789 if (!wdev->p2p_started) 820 cfg80211_stop_p2p_device(rdev, wdev);
790 break;
791 rdev_stop_p2p_device(rdev, wdev);
792 wdev->p2p_started = false;
793 rdev->opencount--;
794 break; 821 break;
795 default: 822 default:
796 WARN_ON_ONCE(1); 823 WARN_ON_ONCE(1);
797 break; 824 break;
798 } 825 }
826 mutex_unlock(&rdev->sched_scan_mtx);
799 mutex_unlock(&rdev->devlist_mtx); 827 mutex_unlock(&rdev->devlist_mtx);
800} 828}
801EXPORT_SYMBOL(cfg80211_unregister_wdev); 829EXPORT_SYMBOL(cfg80211_unregister_wdev);
@@ -936,6 +964,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
936 cfg80211_update_iface_num(rdev, wdev->iftype, 1); 964 cfg80211_update_iface_num(rdev, wdev->iftype, 1);
937 cfg80211_lock_rdev(rdev); 965 cfg80211_lock_rdev(rdev);
938 mutex_lock(&rdev->devlist_mtx); 966 mutex_lock(&rdev->devlist_mtx);
967 mutex_lock(&rdev->sched_scan_mtx);
939 wdev_lock(wdev); 968 wdev_lock(wdev);
940 switch (wdev->iftype) { 969 switch (wdev->iftype) {
941#ifdef CONFIG_CFG80211_WEXT 970#ifdef CONFIG_CFG80211_WEXT
@@ -967,6 +996,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
967 break; 996 break;
968 } 997 }
969 wdev_unlock(wdev); 998 wdev_unlock(wdev);
999 mutex_unlock(&rdev->sched_scan_mtx);
970 rdev->opencount++; 1000 rdev->opencount++;
971 mutex_unlock(&rdev->devlist_mtx); 1001 mutex_unlock(&rdev->devlist_mtx);
972 cfg80211_unlock_rdev(rdev); 1002 cfg80211_unlock_rdev(rdev);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 3aec0e429d8a..5845c2b37aa8 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -503,6 +503,9 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
503void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, 503void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
504 enum nl80211_iftype iftype, int num); 504 enum nl80211_iftype iftype, int num);
505 505
506void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
507 struct wireless_dev *wdev);
508
506#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 509#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
507 510
508#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS 511#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d44ab216c0ec..58e13a8c95f9 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4702,14 +4702,19 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
4702 if (!rdev->ops->scan) 4702 if (!rdev->ops->scan)
4703 return -EOPNOTSUPP; 4703 return -EOPNOTSUPP;
4704 4704
4705 if (rdev->scan_req) 4705 mutex_lock(&rdev->sched_scan_mtx);
4706 return -EBUSY; 4706 if (rdev->scan_req) {
4707 err = -EBUSY;
4708 goto unlock;
4709 }
4707 4710
4708 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { 4711 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
4709 n_channels = validate_scan_freqs( 4712 n_channels = validate_scan_freqs(
4710 info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); 4713 info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
4711 if (!n_channels) 4714 if (!n_channels) {
4712 return -EINVAL; 4715 err = -EINVAL;
4716 goto unlock;
4717 }
4713 } else { 4718 } else {
4714 enum ieee80211_band band; 4719 enum ieee80211_band band;
4715 n_channels = 0; 4720 n_channels = 0;
@@ -4723,23 +4728,29 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
4723 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) 4728 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp)
4724 n_ssids++; 4729 n_ssids++;
4725 4730
4726 if (n_ssids > wiphy->max_scan_ssids) 4731 if (n_ssids > wiphy->max_scan_ssids) {
4727 return -EINVAL; 4732 err = -EINVAL;
4733 goto unlock;
4734 }
4728 4735
4729 if (info->attrs[NL80211_ATTR_IE]) 4736 if (info->attrs[NL80211_ATTR_IE])
4730 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 4737 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
4731 else 4738 else
4732 ie_len = 0; 4739 ie_len = 0;
4733 4740
4734 if (ie_len > wiphy->max_scan_ie_len) 4741 if (ie_len > wiphy->max_scan_ie_len) {
4735 return -EINVAL; 4742 err = -EINVAL;
4743 goto unlock;
4744 }
4736 4745
4737 request = kzalloc(sizeof(*request) 4746 request = kzalloc(sizeof(*request)
4738 + sizeof(*request->ssids) * n_ssids 4747 + sizeof(*request->ssids) * n_ssids
4739 + sizeof(*request->channels) * n_channels 4748 + sizeof(*request->channels) * n_channels
4740 + ie_len, GFP_KERNEL); 4749 + ie_len, GFP_KERNEL);
4741 if (!request) 4750 if (!request) {
4742 return -ENOMEM; 4751 err = -ENOMEM;
4752 goto unlock;
4753 }
4743 4754
4744 if (n_ssids) 4755 if (n_ssids)
4745 request->ssids = (void *)&request->channels[n_channels]; 4756 request->ssids = (void *)&request->channels[n_channels];
@@ -4876,6 +4887,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
4876 kfree(request); 4887 kfree(request);
4877 } 4888 }
4878 4889
4890 unlock:
4891 mutex_unlock(&rdev->sched_scan_mtx);
4879 return err; 4892 return err;
4880} 4893}
4881 4894
@@ -7749,20 +7762,9 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
7749 if (!rdev->ops->stop_p2p_device) 7762 if (!rdev->ops->stop_p2p_device)
7750 return -EOPNOTSUPP; 7763 return -EOPNOTSUPP;
7751 7764
7752 if (!wdev->p2p_started) 7765 mutex_lock(&rdev->sched_scan_mtx);
7753 return 0; 7766 cfg80211_stop_p2p_device(rdev, wdev);
7754 7767 mutex_unlock(&rdev->sched_scan_mtx);
7755 rdev_stop_p2p_device(rdev, wdev);
7756 wdev->p2p_started = false;
7757
7758 mutex_lock(&rdev->devlist_mtx);
7759 rdev->opencount--;
7760 mutex_unlock(&rdev->devlist_mtx);
7761
7762 if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
7763 rdev->scan_req->aborted = true;
7764 ___cfg80211_scan_done(rdev, true);
7765 }
7766 7768
7767 return 0; 7769 return 0;
7768} 7770}
@@ -8486,7 +8488,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
8486 struct nlattr *nest; 8488 struct nlattr *nest;
8487 int i; 8489 int i;
8488 8490
8489 ASSERT_RDEV_LOCK(rdev); 8491 lockdep_assert_held(&rdev->sched_scan_mtx);
8490 8492
8491 if (WARN_ON(!req)) 8493 if (WARN_ON(!req))
8492 return 0; 8494 return 0;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 674aadca0079..fd99ea495b7e 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
169 union iwreq_data wrqu; 169 union iwreq_data wrqu;
170#endif 170#endif
171 171
172 ASSERT_RDEV_LOCK(rdev); 172 lockdep_assert_held(&rdev->sched_scan_mtx);
173 173
174 request = rdev->scan_req; 174 request = rdev->scan_req;
175 175
@@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk)
230 rdev = container_of(wk, struct cfg80211_registered_device, 230 rdev = container_of(wk, struct cfg80211_registered_device,
231 scan_done_wk); 231 scan_done_wk);
232 232
233 cfg80211_lock_rdev(rdev); 233 mutex_lock(&rdev->sched_scan_mtx);
234 ___cfg80211_scan_done(rdev, false); 234 ___cfg80211_scan_done(rdev, false);
235 cfg80211_unlock_rdev(rdev); 235 mutex_unlock(&rdev->sched_scan_mtx);
236} 236}
237 237
238void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) 238void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
@@ -698,11 +698,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
698 found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR); 698 found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR);
699 699
700 if (found) { 700 if (found) {
701 found->pub.beacon_interval = tmp->pub.beacon_interval;
702 found->pub.signal = tmp->pub.signal;
703 found->pub.capability = tmp->pub.capability;
704 found->ts = tmp->ts;
705
706 /* Update IEs */ 701 /* Update IEs */
707 if (rcu_access_pointer(tmp->pub.proberesp_ies)) { 702 if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
708 const struct cfg80211_bss_ies *old; 703 const struct cfg80211_bss_ies *old;
@@ -723,6 +718,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
723 718
724 if (found->pub.hidden_beacon_bss && 719 if (found->pub.hidden_beacon_bss &&
725 !list_empty(&found->hidden_list)) { 720 !list_empty(&found->hidden_list)) {
721 const struct cfg80211_bss_ies *f;
722
726 /* 723 /*
727 * The found BSS struct is one of the probe 724 * The found BSS struct is one of the probe
728 * response members of a group, but we're 725 * response members of a group, but we're
@@ -732,6 +729,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
732 * SSID to showing it, which is confusing so 729 * SSID to showing it, which is confusing so
733 * drop this information. 730 * drop this information.
734 */ 731 */
732
733 f = rcu_access_pointer(tmp->pub.beacon_ies);
734 kfree_rcu((struct cfg80211_bss_ies *)f,
735 rcu_head);
735 goto drop; 736 goto drop;
736 } 737 }
737 738
@@ -761,6 +762,11 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
761 kfree_rcu((struct cfg80211_bss_ies *)old, 762 kfree_rcu((struct cfg80211_bss_ies *)old,
762 rcu_head); 763 rcu_head);
763 } 764 }
765
766 found->pub.beacon_interval = tmp->pub.beacon_interval;
767 found->pub.signal = tmp->pub.signal;
768 found->pub.capability = tmp->pub.capability;
769 found->ts = tmp->ts;
764 } else { 770 } else {
765 struct cfg80211_internal_bss *new; 771 struct cfg80211_internal_bss *new;
766 struct cfg80211_internal_bss *hidden; 772 struct cfg80211_internal_bss *hidden;
@@ -1056,6 +1062,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
1056 if (IS_ERR(rdev)) 1062 if (IS_ERR(rdev))
1057 return PTR_ERR(rdev); 1063 return PTR_ERR(rdev);
1058 1064
1065 mutex_lock(&rdev->sched_scan_mtx);
1059 if (rdev->scan_req) { 1066 if (rdev->scan_req) {
1060 err = -EBUSY; 1067 err = -EBUSY;
1061 goto out; 1068 goto out;
@@ -1162,6 +1169,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
1162 dev_hold(dev); 1169 dev_hold(dev);
1163 } 1170 }
1164 out: 1171 out:
1172 mutex_unlock(&rdev->sched_scan_mtx);
1165 kfree(creq); 1173 kfree(creq);
1166 cfg80211_unlock_rdev(rdev); 1174 cfg80211_unlock_rdev(rdev);
1167 return err; 1175 return err;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f432bd3755b1..482c70e70127 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -85,6 +85,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
85 ASSERT_RTNL(); 85 ASSERT_RTNL();
86 ASSERT_RDEV_LOCK(rdev); 86 ASSERT_RDEV_LOCK(rdev);
87 ASSERT_WDEV_LOCK(wdev); 87 ASSERT_WDEV_LOCK(wdev);
88 lockdep_assert_held(&rdev->sched_scan_mtx);
88 89
89 if (rdev->scan_req) 90 if (rdev->scan_req)
90 return -EBUSY; 91 return -EBUSY;
@@ -223,6 +224,7 @@ void cfg80211_conn_work(struct work_struct *work)
223 rtnl_lock(); 224 rtnl_lock();
224 cfg80211_lock_rdev(rdev); 225 cfg80211_lock_rdev(rdev);
225 mutex_lock(&rdev->devlist_mtx); 226 mutex_lock(&rdev->devlist_mtx);
227 mutex_lock(&rdev->sched_scan_mtx);
226 228
227 list_for_each_entry(wdev, &rdev->wdev_list, list) { 229 list_for_each_entry(wdev, &rdev->wdev_list, list) {
228 wdev_lock(wdev); 230 wdev_lock(wdev);
@@ -247,6 +249,7 @@ void cfg80211_conn_work(struct work_struct *work)
247 wdev_unlock(wdev); 249 wdev_unlock(wdev);
248 } 250 }
249 251
252 mutex_unlock(&rdev->sched_scan_mtx);
250 mutex_unlock(&rdev->devlist_mtx); 253 mutex_unlock(&rdev->devlist_mtx);
251 cfg80211_unlock_rdev(rdev); 254 cfg80211_unlock_rdev(rdev);
252 rtnl_unlock(); 255 rtnl_unlock();
@@ -320,11 +323,9 @@ void cfg80211_sme_scan_done(struct net_device *dev)
320{ 323{
321 struct wireless_dev *wdev = dev->ieee80211_ptr; 324 struct wireless_dev *wdev = dev->ieee80211_ptr;
322 325
323 mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
324 wdev_lock(wdev); 326 wdev_lock(wdev);
325 __cfg80211_sme_scan_done(dev); 327 __cfg80211_sme_scan_done(dev);
326 wdev_unlock(wdev); 328 wdev_unlock(wdev);
327 mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
328} 329}
329 330
330void cfg80211_sme_rx_auth(struct net_device *dev, 331void cfg80211_sme_rx_auth(struct net_device *dev,
@@ -924,9 +925,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
924 int err; 925 int err;
925 926
926 mutex_lock(&rdev->devlist_mtx); 927 mutex_lock(&rdev->devlist_mtx);
928 /* might request scan - scan_mtx -> wdev_mtx dependency */
929 mutex_lock(&rdev->sched_scan_mtx);
927 wdev_lock(dev->ieee80211_ptr); 930 wdev_lock(dev->ieee80211_ptr);
928 err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); 931 err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL);
929 wdev_unlock(dev->ieee80211_ptr); 932 wdev_unlock(dev->ieee80211_ptr);
933 mutex_unlock(&rdev->sched_scan_mtx);
930 mutex_unlock(&rdev->devlist_mtx); 934 mutex_unlock(&rdev->devlist_mtx);
931 935
932 return err; 936 return err;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index b7a531380e19..7586de77a2f8 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -27,7 +27,8 @@
27#define WIPHY_PR_ARG __entry->wiphy_name 27#define WIPHY_PR_ARG __entry->wiphy_name
28 28
29#define WDEV_ENTRY __field(u32, id) 29#define WDEV_ENTRY __field(u32, id)
30#define WDEV_ASSIGN (__entry->id) = (wdev ? wdev->identifier : 0) 30#define WDEV_ASSIGN (__entry->id) = (!IS_ERR_OR_NULL(wdev) \
31 ? wdev->identifier : 0)
31#define WDEV_PR_FMT "wdev(%u)" 32#define WDEV_PR_FMT "wdev(%u)"
32#define WDEV_PR_ARG (__entry->id) 33#define WDEV_PR_ARG (__entry->id)
33 34
@@ -1778,7 +1779,7 @@ TRACE_EVENT(rdev_set_mac_acl,
1778 ), 1779 ),
1779 TP_fast_assign( 1780 TP_fast_assign(
1780 WIPHY_ASSIGN; 1781 WIPHY_ASSIGN;
1781 WIPHY_ASSIGN; 1782 NETDEV_ASSIGN;
1782 __entry->acl_policy = params->acl_policy; 1783 __entry->acl_policy = params->acl_policy;
1783 ), 1784 ),
1784 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", 1785 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d",
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index fb9622f6d99c..e79cb5c0655a 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -89,6 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
89 89
90 cfg80211_lock_rdev(rdev); 90 cfg80211_lock_rdev(rdev);
91 mutex_lock(&rdev->devlist_mtx); 91 mutex_lock(&rdev->devlist_mtx);
92 mutex_lock(&rdev->sched_scan_mtx);
92 wdev_lock(wdev); 93 wdev_lock(wdev);
93 94
94 if (wdev->sme_state != CFG80211_SME_IDLE) { 95 if (wdev->sme_state != CFG80211_SME_IDLE) {
@@ -135,6 +136,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
135 err = cfg80211_mgd_wext_connect(rdev, wdev); 136 err = cfg80211_mgd_wext_connect(rdev, wdev);
136 out: 137 out:
137 wdev_unlock(wdev); 138 wdev_unlock(wdev);
139 mutex_unlock(&rdev->sched_scan_mtx);
138 mutex_unlock(&rdev->devlist_mtx); 140 mutex_unlock(&rdev->devlist_mtx);
139 cfg80211_unlock_rdev(rdev); 141 cfg80211_unlock_rdev(rdev);
140 return err; 142 return err;
@@ -190,6 +192,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
190 192
191 cfg80211_lock_rdev(rdev); 193 cfg80211_lock_rdev(rdev);
192 mutex_lock(&rdev->devlist_mtx); 194 mutex_lock(&rdev->devlist_mtx);
195 mutex_lock(&rdev->sched_scan_mtx);
193 wdev_lock(wdev); 196 wdev_lock(wdev);
194 197
195 err = 0; 198 err = 0;
@@ -223,6 +226,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
223 err = cfg80211_mgd_wext_connect(rdev, wdev); 226 err = cfg80211_mgd_wext_connect(rdev, wdev);
224 out: 227 out:
225 wdev_unlock(wdev); 228 wdev_unlock(wdev);
229 mutex_unlock(&rdev->sched_scan_mtx);
226 mutex_unlock(&rdev->devlist_mtx); 230 mutex_unlock(&rdev->devlist_mtx);
227 cfg80211_unlock_rdev(rdev); 231 cfg80211_unlock_rdev(rdev);
228 return err; 232 return err;
@@ -285,6 +289,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
285 289
286 cfg80211_lock_rdev(rdev); 290 cfg80211_lock_rdev(rdev);
287 mutex_lock(&rdev->devlist_mtx); 291 mutex_lock(&rdev->devlist_mtx);
292 mutex_lock(&rdev->sched_scan_mtx);
288 wdev_lock(wdev); 293 wdev_lock(wdev);
289 294
290 if (wdev->sme_state != CFG80211_SME_IDLE) { 295 if (wdev->sme_state != CFG80211_SME_IDLE) {
@@ -313,6 +318,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
313 err = cfg80211_mgd_wext_connect(rdev, wdev); 318 err = cfg80211_mgd_wext_connect(rdev, wdev);
314 out: 319 out:
315 wdev_unlock(wdev); 320 wdev_unlock(wdev);
321 mutex_unlock(&rdev->sched_scan_mtx);
316 mutex_unlock(&rdev->devlist_mtx); 322 mutex_unlock(&rdev->devlist_mtx);
317 cfg80211_unlock_rdev(rdev); 323 cfg80211_unlock_rdev(rdev);
318 return err; 324 return err;
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 35754cc8a9e5..8dafe6d3c6e4 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -334,6 +334,70 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
334 x->xflags &= ~XFRM_TIME_DEFER; 334 x->xflags &= ~XFRM_TIME_DEFER;
335} 335}
336 336
337static void xfrm_replay_notify_esn(struct xfrm_state *x, int event)
338{
339 u32 seq_diff, oseq_diff;
340 struct km_event c;
341 struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
342 struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn;
343
344 /* we send notify messages in case
345 * 1. we updated on of the sequence numbers, and the seqno difference
346 * is at least x->replay_maxdiff, in this case we also update the
347 * timeout of our timer function
348 * 2. if x->replay_maxage has elapsed since last update,
349 * and there were changes
350 *
351 * The state structure must be locked!
352 */
353
354 switch (event) {
355 case XFRM_REPLAY_UPDATE:
356 if (!x->replay_maxdiff)
357 break;
358
359 if (replay_esn->seq_hi == preplay_esn->seq_hi)
360 seq_diff = replay_esn->seq - preplay_esn->seq;
361 else
362 seq_diff = ~preplay_esn->seq + replay_esn->seq + 1;
363
364 if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
365 oseq_diff = replay_esn->oseq - preplay_esn->oseq;
366 else
367 oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1;
368
369 if (seq_diff < x->replay_maxdiff &&
370 oseq_diff < x->replay_maxdiff) {
371
372 if (x->xflags & XFRM_TIME_DEFER)
373 event = XFRM_REPLAY_TIMEOUT;
374 else
375 return;
376 }
377
378 break;
379
380 case XFRM_REPLAY_TIMEOUT:
381 if (memcmp(x->replay_esn, x->preplay_esn,
382 xfrm_replay_state_esn_len(replay_esn)) == 0) {
383 x->xflags |= XFRM_TIME_DEFER;
384 return;
385 }
386
387 break;
388 }
389
390 memcpy(x->preplay_esn, x->replay_esn,
391 xfrm_replay_state_esn_len(replay_esn));
392 c.event = XFRM_MSG_NEWAE;
393 c.data.aevent = event;
394 km_state_notify(x, &c);
395
396 if (x->replay_maxage &&
397 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
398 x->xflags &= ~XFRM_TIME_DEFER;
399}
400
337static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) 401static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)
338{ 402{
339 int err = 0; 403 int err = 0;
@@ -510,7 +574,7 @@ static struct xfrm_replay xfrm_replay_esn = {
510 .advance = xfrm_replay_advance_esn, 574 .advance = xfrm_replay_advance_esn,
511 .check = xfrm_replay_check_esn, 575 .check = xfrm_replay_check_esn,
512 .recheck = xfrm_replay_recheck_esn, 576 .recheck = xfrm_replay_recheck_esn,
513 .notify = xfrm_replay_notify_bmp, 577 .notify = xfrm_replay_notify_esn,
514 .overflow = xfrm_replay_overflow_esn, 578 .overflow = xfrm_replay_overflow_esn,
515}; 579};
516 580
diff --git a/security/capability.c b/security/capability.c
index 579775088967..6783c3e6c88e 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -737,6 +737,11 @@ static int cap_tun_dev_open(void *security)
737{ 737{
738 return 0; 738 return 0;
739} 739}
740
741static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk)
742{
743}
744
740#endif /* CONFIG_SECURITY_NETWORK */ 745#endif /* CONFIG_SECURITY_NETWORK */
741 746
742#ifdef CONFIG_SECURITY_NETWORK_XFRM 747#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1071,6 +1076,7 @@ void __init security_fixup_ops(struct security_operations *ops)
1071 set_to_cap_if_null(ops, tun_dev_open); 1076 set_to_cap_if_null(ops, tun_dev_open);
1072 set_to_cap_if_null(ops, tun_dev_attach_queue); 1077 set_to_cap_if_null(ops, tun_dev_attach_queue);
1073 set_to_cap_if_null(ops, tun_dev_attach); 1078 set_to_cap_if_null(ops, tun_dev_attach);
1079 set_to_cap_if_null(ops, skb_owned_by);
1074#endif /* CONFIG_SECURITY_NETWORK */ 1080#endif /* CONFIG_SECURITY_NETWORK */
1075#ifdef CONFIG_SECURITY_NETWORK_XFRM 1081#ifdef CONFIG_SECURITY_NETWORK_XFRM
1076 set_to_cap_if_null(ops, xfrm_policy_alloc_security); 1082 set_to_cap_if_null(ops, xfrm_policy_alloc_security);
diff --git a/security/security.c b/security/security.c
index 7b88c6aeaed4..03f248b84e9f 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1290,6 +1290,11 @@ int security_tun_dev_open(void *security)
1290} 1290}
1291EXPORT_SYMBOL(security_tun_dev_open); 1291EXPORT_SYMBOL(security_tun_dev_open);
1292 1292
1293void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
1294{
1295 security_ops->skb_owned_by(skb, sk);
1296}
1297
1293#endif /* CONFIG_SECURITY_NETWORK */ 1298#endif /* CONFIG_SECURITY_NETWORK */
1294 1299
1295#ifdef CONFIG_SECURITY_NETWORK_XFRM 1300#ifdef CONFIG_SECURITY_NETWORK_XFRM
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 2fa28c88900c..7171a957b933 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -51,6 +51,7 @@
51#include <linux/tty.h> 51#include <linux/tty.h>
52#include <net/icmp.h> 52#include <net/icmp.h>
53#include <net/ip.h> /* for local_port_range[] */ 53#include <net/ip.h> /* for local_port_range[] */
54#include <net/sock.h>
54#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ 55#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
55#include <net/net_namespace.h> 56#include <net/net_namespace.h>
56#include <net/netlabel.h> 57#include <net/netlabel.h>
@@ -4363,6 +4364,11 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
4363 selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid); 4364 selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
4364} 4365}
4365 4366
4367static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk)
4368{
4369 skb_set_owner_w(skb, sk);
4370}
4371
4366static int selinux_secmark_relabel_packet(u32 sid) 4372static int selinux_secmark_relabel_packet(u32 sid)
4367{ 4373{
4368 const struct task_security_struct *__tsec; 4374 const struct task_security_struct *__tsec;
@@ -5664,6 +5670,7 @@ static struct security_operations selinux_ops = {
5664 .tun_dev_attach_queue = selinux_tun_dev_attach_queue, 5670 .tun_dev_attach_queue = selinux_tun_dev_attach_queue,
5665 .tun_dev_attach = selinux_tun_dev_attach, 5671 .tun_dev_attach = selinux_tun_dev_attach,
5666 .tun_dev_open = selinux_tun_dev_open, 5672 .tun_dev_open = selinux_tun_dev_open,
5673 .skb_owned_by = selinux_skb_owned_by,
5667 5674
5668#ifdef CONFIG_SECURITY_NETWORK_XFRM 5675#ifdef CONFIG_SECURITY_NETWORK_XFRM
5669 .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc, 5676 .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 23414b93771f..13c88fbcf037 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -347,10 +347,8 @@ int yama_ptrace_traceme(struct task_struct *parent)
347 /* Only disallow PTRACE_TRACEME on more aggressive settings. */ 347 /* Only disallow PTRACE_TRACEME on more aggressive settings. */
348 switch (ptrace_scope) { 348 switch (ptrace_scope) {
349 case YAMA_SCOPE_CAPABILITY: 349 case YAMA_SCOPE_CAPABILITY:
350 rcu_read_lock(); 350 if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE))
351 if (!ns_capable(__task_cred(parent)->user_ns, CAP_SYS_PTRACE))
352 rc = -EPERM; 351 rc = -EPERM;
353 rcu_read_unlock();
354 break; 352 break;
355 case YAMA_SCOPE_NO_ATTACH: 353 case YAMA_SCOPE_NO_ATTACH:
356 rc = -EPERM; 354 rc = -EPERM;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index ecdf30eb5879..4aba7646dd9c 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -173,7 +173,7 @@ const char *snd_hda_get_jack_type(u32 cfg)
173 "Line Out", "Speaker", "HP Out", "CD", 173 "Line Out", "Speaker", "HP Out", "CD",
174 "SPDIF Out", "Digital Out", "Modem Line", "Modem Hand", 174 "SPDIF Out", "Digital Out", "Modem Line", "Modem Hand",
175 "Line In", "Aux", "Mic", "Telephony", 175 "Line In", "Aux", "Mic", "Telephony",
176 "SPDIF In", "Digitial In", "Reserved", "Other" 176 "SPDIF In", "Digital In", "Reserved", "Other"
177 }; 177 };
178 178
179 return jack_types[(cfg & AC_DEFCFG_DEVICE) 179 return jack_types[(cfg & AC_DEFCFG_DEVICE)
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 7dd846380a50..d0d7ac1e99d2 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -320,7 +320,7 @@ int snd_hdmi_get_eld(struct hda_codec *codec, hda_nid_t nid,
320 unsigned char *buf, int *eld_size) 320 unsigned char *buf, int *eld_size)
321{ 321{
322 int i; 322 int i;
323 int ret; 323 int ret = 0;
324 int size; 324 int size;
325 325
326 /* 326 /*
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 43c2ea539561..2dbe767be16b 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -740,7 +740,7 @@ EXPORT_SYMBOL_HDA(snd_hda_activate_path);
740static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path) 740static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path)
741{ 741{
742 struct hda_gen_spec *spec = codec->spec; 742 struct hda_gen_spec *spec = codec->spec;
743 bool changed; 743 bool changed = false;
744 int i; 744 int i;
745 745
746 if (!spec->power_down_unused || path->active) 746 if (!spec->power_down_unused || path->active)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 418bfc0eb0a3..bcd40ee488e3 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -134,8 +134,8 @@ MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
134 * this may give more power-saving, but will take longer time to 134 * this may give more power-saving, but will take longer time to
135 * wake up. 135 * wake up.
136 */ 136 */
137static int power_save_controller = -1; 137static bool power_save_controller = 1;
138module_param(power_save_controller, bint, 0644); 138module_param(power_save_controller, bool, 0644);
139MODULE_PARM_DESC(power_save_controller, "Reset controller in power save mode."); 139MODULE_PARM_DESC(power_save_controller, "Reset controller in power save mode.");
140#endif /* CONFIG_PM */ 140#endif /* CONFIG_PM */
141 141
@@ -2931,8 +2931,6 @@ static int azx_runtime_idle(struct device *dev)
2931 struct snd_card *card = dev_get_drvdata(dev); 2931 struct snd_card *card = dev_get_drvdata(dev);
2932 struct azx *chip = card->private_data; 2932 struct azx *chip = card->private_data;
2933 2933
2934 if (power_save_controller > 0)
2935 return 0;
2936 if (!power_save_controller || 2934 if (!power_save_controller ||
2937 !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) 2935 !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
2938 return -EBUSY; 2936 return -EBUSY;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 78e1827d0a95..de8ac5c07fd0 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1196,7 +1196,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
1196 1196
1197 _snd_printd(SND_PR_VERBOSE, 1197 _snd_printd(SND_PR_VERBOSE,
1198 "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", 1198 "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
1199 codec->addr, pin_nid, eld->monitor_present, eld->eld_valid); 1199 codec->addr, pin_nid, pin_eld->monitor_present, eld->eld_valid);
1200 1200
1201 if (eld->eld_valid) { 1201 if (eld->eld_valid) {
1202 if (snd_hdmi_get_eld(codec, pin_nid, eld->eld_buffer, 1202 if (snd_hdmi_get_eld(codec, pin_nid, eld->eld_buffer,
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 563c24df4d6f..f15c36bde540 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3440,7 +3440,8 @@ static int alc662_parse_auto_config(struct hda_codec *codec)
3440 const hda_nid_t *ssids; 3440 const hda_nid_t *ssids;
3441 3441
3442 if (codec->vendor_id == 0x10ec0272 || codec->vendor_id == 0x10ec0663 || 3442 if (codec->vendor_id == 0x10ec0272 || codec->vendor_id == 0x10ec0663 ||
3443 codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670) 3443 codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670 ||
3444 codec->vendor_id == 0x10ec0671)
3444 ssids = alc663_ssids; 3445 ssids = alc663_ssids;
3445 else 3446 else
3446 ssids = alc662_ssids; 3447 ssids = alc662_ssids;
@@ -3894,6 +3895,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
3894 { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 }, 3895 { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 },
3895 { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 }, 3896 { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 },
3896 { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, 3897 { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
3898 { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 },
3897 { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 }, 3899 { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
3898 { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, 3900 { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
3899 { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 }, 3901 { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index fc176044994d..fc176044994d 100755..100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h
index 7e103f249053..7e103f249053 100755..100644
--- a/sound/soc/codecs/max98090.h
+++ b/sound/soc/codecs/max98090.h
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index f2d61a187830..566ea3256e2d 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -159,6 +159,7 @@ static int si476x_codec_hw_params(struct snd_pcm_substream *substream,
159 switch (params_format(params)) { 159 switch (params_format(params)) {
160 case SNDRV_PCM_FORMAT_S8: 160 case SNDRV_PCM_FORMAT_S8:
161 width = SI476X_PCM_FORMAT_S8; 161 width = SI476X_PCM_FORMAT_S8;
162 break;
162 case SNDRV_PCM_FORMAT_S16_LE: 163 case SNDRV_PCM_FORMAT_S16_LE:
163 width = SI476X_PCM_FORMAT_S16_LE; 164 width = SI476X_PCM_FORMAT_S16_LE;
164 break; 165 break;
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index b82bbf584146..34d0201d6a78 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -584,7 +584,7 @@ static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w,
584 struct snd_kcontrol *kcontrol, int event) 584 struct snd_kcontrol *kcontrol, int event)
585{ 585{
586 struct snd_soc_codec *codec = w->codec; 586 struct snd_soc_codec *codec = w->codec;
587 struct arizona *arizona = dev_get_drvdata(codec->dev); 587 struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
588 struct regmap *regmap = codec->control_data; 588 struct regmap *regmap = codec->control_data;
589 const struct reg_default *patch = NULL; 589 const struct reg_default *patch = NULL;
590 int i, patch_size; 590 int i, patch_size;
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 134e41c870b9..f8a31ad0b203 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -1083,6 +1083,8 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = {
1083 { "ROP", NULL, "Right Speaker PGA" }, 1083 { "ROP", NULL, "Right Speaker PGA" },
1084 { "RON", NULL, "Right Speaker PGA" }, 1084 { "RON", NULL, "Right Speaker PGA" },
1085 1085
1086 { "Charge Pump", NULL, "CLK_DSP" },
1087
1086 { "Left Headphone Output PGA", NULL, "Charge Pump" }, 1088 { "Left Headphone Output PGA", NULL, "Charge Pump" },
1087 { "Right Headphone Output PGA", NULL, "Charge Pump" }, 1089 { "Right Headphone Output PGA", NULL, "Charge Pump" },
1088 { "Left Line Output PGA", NULL, "Charge Pump" }, 1090 { "Left Line Output PGA", NULL, "Charge Pump" },
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index f3f7e75f8628..9af1bddc4c62 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -828,7 +828,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
828 &buf_list); 828 &buf_list);
829 if (!buf) { 829 if (!buf) {
830 adsp_err(dsp, "Out of memory\n"); 830 adsp_err(dsp, "Out of memory\n");
831 return -ENOMEM; 831 ret = -ENOMEM;
832 goto out_fw;
832 } 833 }
833 834
834 adsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", 835 adsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
@@ -865,7 +866,7 @@ out_fw:
865 wm_adsp_buf_free(&buf_list); 866 wm_adsp_buf_free(&buf_list);
866out: 867out:
867 kfree(file); 868 kfree(file);
868 return 0; 869 return ret;
869} 870}
870 871
871int wm_adsp1_init(struct wm_adsp *adsp) 872int wm_adsp1_init(struct wm_adsp *adsp)
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index dce05b613bd3..7c84eb123d76 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -496,6 +496,8 @@ static void imx_ssi_ac97_reset(struct snd_ac97 *ac97)
496 496
497 if (imx_ssi->ac97_reset) 497 if (imx_ssi->ac97_reset)
498 imx_ssi->ac97_reset(ac97); 498 imx_ssi->ac97_reset(ac97);
499 /* First read sometimes fails, do a dummy read */
500 imx_ssi_ac97_read(ac97, 0);
499} 501}
500 502
501static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) 503static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97)
@@ -504,6 +506,9 @@ static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97)
504 506
505 if (imx_ssi->ac97_warm_reset) 507 if (imx_ssi->ac97_warm_reset)
506 imx_ssi->ac97_warm_reset(ac97); 508 imx_ssi->ac97_warm_reset(ac97);
509
510 /* First read sometimes fails, do a dummy read */
511 imx_ssi_ac97_read(ac97, 0);
507} 512}
508 513
509struct snd_ac97_bus_ops soc_ac97_ops = { 514struct snd_ac97_bus_ops soc_ac97_ops = {
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index 8e52c1485df3..eb4373840bb6 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -51,7 +51,7 @@ static struct snd_soc_card pcm030_card = {
51 .num_links = ARRAY_SIZE(pcm030_fabric_dai), 51 .num_links = ARRAY_SIZE(pcm030_fabric_dai),
52}; 52};
53 53
54static int __init pcm030_fabric_probe(struct platform_device *op) 54static int pcm030_fabric_probe(struct platform_device *op)
55{ 55{
56 struct device_node *np = op->dev.of_node; 56 struct device_node *np = op->dev.of_node;
57 struct device_node *platform_np; 57 struct device_node *platform_np;
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index d7231e336a7c..6bbeb0bf1a73 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -972,6 +972,7 @@ static const struct snd_soc_dai_ops samsung_i2s_dai_ops = {
972static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec) 972static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
973{ 973{
974 struct i2s_dai *i2s; 974 struct i2s_dai *i2s;
975 int ret;
975 976
976 i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL); 977 i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL);
977 if (i2s == NULL) 978 if (i2s == NULL)
@@ -996,15 +997,17 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
996 i2s->i2s_dai_drv.capture.channels_max = 2; 997 i2s->i2s_dai_drv.capture.channels_max = 2;
997 i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES; 998 i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES;
998 i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS; 999 i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS;
1000 dev_set_drvdata(&i2s->pdev->dev, i2s);
999 } else { /* Create a new platform_device for Secondary */ 1001 } else { /* Create a new platform_device for Secondary */
1000 i2s->pdev = platform_device_register_resndata(NULL, 1002 i2s->pdev = platform_device_alloc("samsung-i2s-sec", -1);
1001 "samsung-i2s-sec", -1, NULL, 0, NULL, 0);
1002 if (IS_ERR(i2s->pdev)) 1003 if (IS_ERR(i2s->pdev))
1003 return NULL; 1004 return NULL;
1004 }
1005 1005
1006 /* Pre-assign snd_soc_dai_set_drvdata */ 1006 platform_set_drvdata(i2s->pdev, i2s);
1007 dev_set_drvdata(&i2s->pdev->dev, i2s); 1007 ret = platform_device_add(i2s->pdev);
1008 if (ret < 0)
1009 return NULL;
1010 }
1008 1011
1009 return i2s; 1012 return i2s;
1010} 1013}
@@ -1107,6 +1110,10 @@ static int samsung_i2s_probe(struct platform_device *pdev)
1107 1110
1108 if (samsung_dai_type == TYPE_SEC) { 1111 if (samsung_dai_type == TYPE_SEC) {
1109 sec_dai = dev_get_drvdata(&pdev->dev); 1112 sec_dai = dev_get_drvdata(&pdev->dev);
1113 if (!sec_dai) {
1114 dev_err(&pdev->dev, "Unable to get drvdata\n");
1115 return -EFAULT;
1116 }
1110 snd_soc_register_dai(&sec_dai->pdev->dev, 1117 snd_soc_register_dai(&sec_dai->pdev->dev,
1111 &sec_dai->i2s_dai_drv); 1118 &sec_dai->i2s_dai_drv);
1112 asoc_dma_platform_register(&pdev->dev); 1119 asoc_dma_platform_register(&pdev->dev);
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index 19eff8fc4fdd..1a8b03e4b41b 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -342,8 +342,8 @@ static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd)
342 return 0; 342 return 0;
343} 343}
344 344
345static struct snd_soc_platform sh7760_soc_platform = { 345static struct snd_soc_platform_driver sh7760_soc_platform = {
346 .pcm_ops = &camelot_pcm_ops, 346 .ops = &camelot_pcm_ops,
347 .pcm_new = camelot_pcm_new, 347 .pcm_new = camelot_pcm_new,
348 .pcm_free = camelot_pcm_free, 348 .pcm_free = camelot_pcm_free,
349}; 349};
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index b5b3db71e253..ed0bfb0ddb96 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -211,19 +211,27 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
211 if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) { 211 if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
212 ret = platform->driver->compr_ops->set_params(cstream, params); 212 ret = platform->driver->compr_ops->set_params(cstream, params);
213 if (ret < 0) 213 if (ret < 0)
214 goto out; 214 goto err;
215 } 215 }
216 216
217 if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) { 217 if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) {
218 ret = rtd->dai_link->compr_ops->set_params(cstream); 218 ret = rtd->dai_link->compr_ops->set_params(cstream);
219 if (ret < 0) 219 if (ret < 0)
220 goto out; 220 goto err;
221 } 221 }
222 222
223 snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK, 223 snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
224 SND_SOC_DAPM_STREAM_START); 224 SND_SOC_DAPM_STREAM_START);
225 225
226out: 226 /* cancel any delayed stream shutdown that is pending */
227 rtd->pop_wait = 0;
228 mutex_unlock(&rtd->pcm_mutex);
229
230 cancel_delayed_work_sync(&rtd->delayed_work);
231
232 return ret;
233
234err:
227 mutex_unlock(&rtd->pcm_mutex); 235 mutex_unlock(&rtd->pcm_mutex);
228 return ret; 236 return ret;
229} 237}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 0e64af1a7df9..78468c64dd86 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2963,7 +2963,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
2963 val = val << shift; 2963 val = val << shift;
2964 2964
2965 ret = snd_soc_update_bits_locked(codec, reg, val_mask, val); 2965 ret = snd_soc_update_bits_locked(codec, reg, val_mask, val);
2966 if (ret != 0) 2966 if (ret < 0)
2967 return ret; 2967 return ret;
2968 2968
2969 if (snd_soc_volsw_is_stereo(mc)) { 2969 if (snd_soc_volsw_is_stereo(mc)) {
@@ -3140,7 +3140,7 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol,
3140 if (params->mask) { 3140 if (params->mask) {
3141 ret = regmap_read(codec->control_data, params->base, &val); 3141 ret = regmap_read(codec->control_data, params->base, &val);
3142 if (ret != 0) 3142 if (ret != 0)
3143 return ret; 3143 goto out;
3144 3144
3145 val &= params->mask; 3145 val &= params->mask;
3146 3146
@@ -3158,13 +3158,15 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol,
3158 ((u32 *)data)[0] |= cpu_to_be32(val); 3158 ((u32 *)data)[0] |= cpu_to_be32(val);
3159 break; 3159 break;
3160 default: 3160 default:
3161 return -EINVAL; 3161 ret = -EINVAL;
3162 goto out;
3162 } 3163 }
3163 } 3164 }
3164 3165
3165 ret = regmap_raw_write(codec->control_data, params->base, 3166 ret = regmap_raw_write(codec->control_data, params->base,
3166 data, len); 3167 data, len);
3167 3168
3169out:
3168 kfree(data); 3170 kfree(data);
3169 3171
3170 return ret; 3172 return ret;
@@ -4237,7 +4239,6 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
4237 dev_err(card->dev, 4239 dev_err(card->dev,
4238 "ASoC: Property '%s' index %d could not be read: %d\n", 4240 "ASoC: Property '%s' index %d could not be read: %d\n",
4239 propname, 2 * i, ret); 4241 propname, 2 * i, ret);
4240 kfree(routes);
4241 return -EINVAL; 4242 return -EINVAL;
4242 } 4243 }
4243 ret = of_property_read_string_index(np, propname, 4244 ret = of_property_read_string_index(np, propname,
@@ -4246,7 +4247,6 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
4246 dev_err(card->dev, 4247 dev_err(card->dev,
4247 "ASoC: Property '%s' index %d could not be read: %d\n", 4248 "ASoC: Property '%s' index %d could not be read: %d\n",
4248 propname, (2 * i) + 1, ret); 4249 propname, (2 * i) + 1, ret);
4249 kfree(routes);
4250 return -EINVAL; 4250 return -EINVAL;
4251 } 4251 }
4252 } 4252 }
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 625d4824abbb..33acd8b892dc 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -831,6 +831,9 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
831 if (path->weak) 831 if (path->weak)
832 continue; 832 continue;
833 833
834 if (path->walking)
835 return 1;
836
834 if (path->walked) 837 if (path->walked)
835 continue; 838 continue;
836 839
@@ -838,6 +841,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
838 841
839 if (path->sink && path->connect) { 842 if (path->sink && path->connect) {
840 path->walked = 1; 843 path->walked = 1;
844 path->walking = 1;
841 845
842 /* do we need to add this widget to the list ? */ 846 /* do we need to add this widget to the list ? */
843 if (list) { 847 if (list) {
@@ -847,11 +851,14 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
847 dev_err(widget->dapm->dev, 851 dev_err(widget->dapm->dev,
848 "ASoC: could not add widget %s\n", 852 "ASoC: could not add widget %s\n",
849 widget->name); 853 widget->name);
854 path->walking = 0;
850 return con; 855 return con;
851 } 856 }
852 } 857 }
853 858
854 con += is_connected_output_ep(path->sink, list); 859 con += is_connected_output_ep(path->sink, list);
860
861 path->walking = 0;
855 } 862 }
856 } 863 }
857 864
@@ -931,6 +938,9 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
931 if (path->weak) 938 if (path->weak)
932 continue; 939 continue;
933 940
941 if (path->walking)
942 return 1;
943
934 if (path->walked) 944 if (path->walked)
935 continue; 945 continue;
936 946
@@ -938,6 +948,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
938 948
939 if (path->source && path->connect) { 949 if (path->source && path->connect) {
940 path->walked = 1; 950 path->walked = 1;
951 path->walking = 1;
941 952
942 /* do we need to add this widget to the list ? */ 953 /* do we need to add this widget to the list ? */
943 if (list) { 954 if (list) {
@@ -947,11 +958,14 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
947 dev_err(widget->dapm->dev, 958 dev_err(widget->dapm->dev,
948 "ASoC: could not add widget %s\n", 959 "ASoC: could not add widget %s\n",
949 widget->name); 960 widget->name);
961 path->walking = 0;
950 return con; 962 return con;
951 } 963 }
952 } 964 }
953 965
954 con += is_connected_input_ep(path->source, list); 966 con += is_connected_input_ep(path->source, list);
967
968 path->walking = 0;
955 } 969 }
956 } 970 }
957 971
diff --git a/sound/soc/spear/spear_pcm.c b/sound/soc/spear/spear_pcm.c
index ba66a3f370a3..d653763f83b7 100644
--- a/sound/soc/spear/spear_pcm.c
+++ b/sound/soc/spear/spear_pcm.c
@@ -136,9 +136,9 @@ static void spear_pcm_free(struct snd_pcm *pcm)
136 136
137static u64 spear_pcm_dmamask = DMA_BIT_MASK(32); 137static u64 spear_pcm_dmamask = DMA_BIT_MASK(32);
138 138
139static int spear_pcm_new(struct snd_card *card, 139static int spear_pcm_new(struct snd_soc_pcm_runtime *rtd)
140 struct snd_soc_dai *dai, struct snd_pcm *pcm)
141{ 140{
141 struct snd_card *card = rtd->card->snd_card;
142 int ret; 142 int ret;
143 143
144 if (!card->dev->dma_mask) 144 if (!card->dev->dma_mask)
@@ -146,16 +146,16 @@ static int spear_pcm_new(struct snd_card *card,
146 if (!card->dev->coherent_dma_mask) 146 if (!card->dev->coherent_dma_mask)
147 card->dev->coherent_dma_mask = DMA_BIT_MASK(32); 147 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
148 148
149 if (dai->driver->playback.channels_min) { 149 if (rtd->cpu_dai->driver->playback.channels_min) {
150 ret = spear_pcm_preallocate_dma_buffer(pcm, 150 ret = spear_pcm_preallocate_dma_buffer(rtd->pcm,
151 SNDRV_PCM_STREAM_PLAYBACK, 151 SNDRV_PCM_STREAM_PLAYBACK,
152 spear_pcm_hardware.buffer_bytes_max); 152 spear_pcm_hardware.buffer_bytes_max);
153 if (ret) 153 if (ret)
154 return ret; 154 return ret;
155 } 155 }
156 156
157 if (dai->driver->capture.channels_min) { 157 if (rtd->cpu_dai->driver->capture.channels_min) {
158 ret = spear_pcm_preallocate_dma_buffer(pcm, 158 ret = spear_pcm_preallocate_dma_buffer(rtd->pcm,
159 SNDRV_PCM_STREAM_CAPTURE, 159 SNDRV_PCM_STREAM_CAPTURE,
160 spear_pcm_hardware.buffer_bytes_max); 160 spear_pcm_hardware.buffer_bytes_max);
161 if (ret) 161 if (ret)
diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c
index 32d08119dd87..f9f247c64c6d 100644
--- a/sound/soc/tegra/tegra_pcm.c
+++ b/sound/soc/tegra/tegra_pcm.c
@@ -43,8 +43,6 @@
43static const struct snd_pcm_hardware tegra_pcm_hardware = { 43static const struct snd_pcm_hardware tegra_pcm_hardware = {
44 .info = SNDRV_PCM_INFO_MMAP | 44 .info = SNDRV_PCM_INFO_MMAP |
45 SNDRV_PCM_INFO_MMAP_VALID | 45 SNDRV_PCM_INFO_MMAP_VALID |
46 SNDRV_PCM_INFO_PAUSE |
47 SNDRV_PCM_INFO_RESUME |
48 SNDRV_PCM_INFO_INTERLEAVED, 46 SNDRV_PCM_INFO_INTERLEAVED,
49 .formats = SNDRV_PCM_FMTBIT_S16_LE, 47 .formats = SNDRV_PCM_FMTBIT_S16_LE,
50 .channels_min = 2, 48 .channels_min = 2,
@@ -111,26 +109,6 @@ static int tegra_pcm_hw_free(struct snd_pcm_substream *substream)
111 return 0; 109 return 0;
112} 110}
113 111
114static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
115{
116 switch (cmd) {
117 case SNDRV_PCM_TRIGGER_START:
118 case SNDRV_PCM_TRIGGER_RESUME:
119 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
120 return snd_dmaengine_pcm_trigger(substream,
121 SNDRV_PCM_TRIGGER_START);
122
123 case SNDRV_PCM_TRIGGER_STOP:
124 case SNDRV_PCM_TRIGGER_SUSPEND:
125 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
126 return snd_dmaengine_pcm_trigger(substream,
127 SNDRV_PCM_TRIGGER_STOP);
128 default:
129 return -EINVAL;
130 }
131 return 0;
132}
133
134static int tegra_pcm_mmap(struct snd_pcm_substream *substream, 112static int tegra_pcm_mmap(struct snd_pcm_substream *substream,
135 struct vm_area_struct *vma) 113 struct vm_area_struct *vma)
136{ 114{
@@ -148,7 +126,7 @@ static struct snd_pcm_ops tegra_pcm_ops = {
148 .ioctl = snd_pcm_lib_ioctl, 126 .ioctl = snd_pcm_lib_ioctl,
149 .hw_params = tegra_pcm_hw_params, 127 .hw_params = tegra_pcm_hw_params,
150 .hw_free = tegra_pcm_hw_free, 128 .hw_free = tegra_pcm_hw_free,
151 .trigger = tegra_pcm_trigger, 129 .trigger = snd_dmaengine_pcm_trigger,
152 .pointer = snd_dmaengine_pcm_pointer, 130 .pointer = snd_dmaengine_pcm_pointer,
153 .mmap = tegra_pcm_mmap, 131 .mmap = tegra_pcm_mmap,
154}; 132};
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 5e634a2eb282..9e2703a25156 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -253,7 +253,7 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface,
253{ 253{
254 struct usb_device *dev = chip->dev; 254 struct usb_device *dev = chip->dev;
255 unsigned char data[4]; 255 unsigned char data[4];
256 int err, crate; 256 int err, cur_rate, prev_rate;
257 int clock = snd_usb_clock_find_source(chip, fmt->clock); 257 int clock = snd_usb_clock_find_source(chip, fmt->clock);
258 258
259 if (clock < 0) 259 if (clock < 0)
@@ -266,6 +266,19 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface,
266 return -ENXIO; 266 return -ENXIO;
267 } 267 }
268 268
269 err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
270 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
271 UAC2_CS_CONTROL_SAM_FREQ << 8,
272 snd_usb_ctrl_intf(chip) | (clock << 8),
273 data, sizeof(data));
274 if (err < 0) {
275 snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n",
276 dev->devnum, iface, fmt->altsetting);
277 prev_rate = 0;
278 } else {
279 prev_rate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24);
280 }
281
269 data[0] = rate; 282 data[0] = rate;
270 data[1] = rate >> 8; 283 data[1] = rate >> 8;
271 data[2] = rate >> 16; 284 data[2] = rate >> 16;
@@ -280,19 +293,31 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface,
280 return err; 293 return err;
281 } 294 }
282 295
283 if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, 296 err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
284 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, 297 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
285 UAC2_CS_CONTROL_SAM_FREQ << 8, 298 UAC2_CS_CONTROL_SAM_FREQ << 8,
286 snd_usb_ctrl_intf(chip) | (clock << 8), 299 snd_usb_ctrl_intf(chip) | (clock << 8),
287 data, sizeof(data))) < 0) { 300 data, sizeof(data));
301 if (err < 0) {
288 snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n", 302 snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n",
289 dev->devnum, iface, fmt->altsetting); 303 dev->devnum, iface, fmt->altsetting);
290 return err; 304 cur_rate = 0;
305 } else {
306 cur_rate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24);
291 } 307 }
292 308
293 crate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); 309 if (cur_rate != rate) {
294 if (crate != rate) 310 snd_printd(KERN_WARNING
295 snd_printd(KERN_WARNING "current rate %d is different from the runtime rate %d\n", crate, rate); 311 "current rate %d is different from the runtime rate %d\n",
312 cur_rate, rate);
313 }
314
315 /* Some devices doesn't respond to sample rate changes while the
316 * interface is active. */
317 if (rate != prev_rate) {
318 usb_set_interface(dev, iface, 0);
319 usb_set_interface(dev, iface, fmt->altsetting);
320 }
296 321
297 return 0; 322 return 0;
298} 323}
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 497d2741d119..ebe91440a068 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -509,7 +509,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
509 else 509 else
510 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, 510 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
511 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 511 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
512 0, cpu_to_le16(wIndex), 512 0, wIndex,
513 &tmp, sizeof(tmp), 1000); 513 &tmp, sizeof(tmp), 1000);
514 up_read(&mixer->chip->shutdown_rwsem); 514 up_read(&mixer->chip->shutdown_rwsem);
515 515
@@ -540,7 +540,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
540 else 540 else
541 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, 541 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
542 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 542 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
543 cpu_to_le16(wValue), cpu_to_le16(wIndex), 543 wValue, wIndex,
544 NULL, 0, 1000); 544 NULL, 0, 1000);
545 up_read(&mixer->chip->shutdown_rwsem); 545 up_read(&mixer->chip->shutdown_rwsem);
546 546
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 5325a3869bb7..9c5ab22358b1 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -486,7 +486,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)
486{ 486{
487 int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 487 int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
488 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE, 488 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
489 cpu_to_le16(1), 0, NULL, 0, 1000); 489 1, 0, NULL, 0, 1000);
490 490
491 if (ret < 0) 491 if (ret < 0)
492 return ret; 492 return ret;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index adc68feb5c5a..f18013f09e68 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1541,21 +1541,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1541} 1541}
1542 1542
1543int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1543int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1544 gpa_t gpa) 1544 gpa_t gpa, unsigned long len)
1545{ 1545{
1546 struct kvm_memslots *slots = kvm_memslots(kvm); 1546 struct kvm_memslots *slots = kvm_memslots(kvm);
1547 int offset = offset_in_page(gpa); 1547 int offset = offset_in_page(gpa);
1548 gfn_t gfn = gpa >> PAGE_SHIFT; 1548 gfn_t start_gfn = gpa >> PAGE_SHIFT;
1549 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
1550 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
1551 gfn_t nr_pages_avail;
1549 1552
1550 ghc->gpa = gpa; 1553 ghc->gpa = gpa;
1551 ghc->generation = slots->generation; 1554 ghc->generation = slots->generation;
1552 ghc->memslot = gfn_to_memslot(kvm, gfn); 1555 ghc->len = len;
1553 ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); 1556 ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1554 if (!kvm_is_error_hva(ghc->hva)) 1557 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
1558 if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
1555 ghc->hva += offset; 1559 ghc->hva += offset;
1556 else 1560 } else {
1557 return -EFAULT; 1561 /*
1558 1562 * If the requested region crosses two memslots, we still
1563 * verify that the entire region is valid here.
1564 */
1565 while (start_gfn <= end_gfn) {
1566 ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1567 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
1568 &nr_pages_avail);
1569 if (kvm_is_error_hva(ghc->hva))
1570 return -EFAULT;
1571 start_gfn += nr_pages_avail;
1572 }
1573 /* Use the slow path for cross page reads and writes. */
1574 ghc->memslot = NULL;
1575 }
1559 return 0; 1576 return 0;
1560} 1577}
1561EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 1578EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
@@ -1566,8 +1583,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1566 struct kvm_memslots *slots = kvm_memslots(kvm); 1583 struct kvm_memslots *slots = kvm_memslots(kvm);
1567 int r; 1584 int r;
1568 1585
1586 BUG_ON(len > ghc->len);
1587
1569 if (slots->generation != ghc->generation) 1588 if (slots->generation != ghc->generation)
1570 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); 1589 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1590
1591 if (unlikely(!ghc->memslot))
1592 return kvm_write_guest(kvm, ghc->gpa, data, len);
1571 1593
1572 if (kvm_is_error_hva(ghc->hva)) 1594 if (kvm_is_error_hva(ghc->hva))
1573 return -EFAULT; 1595 return -EFAULT;
@@ -1587,8 +1609,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1587 struct kvm_memslots *slots = kvm_memslots(kvm); 1609 struct kvm_memslots *slots = kvm_memslots(kvm);
1588 int r; 1610 int r;
1589 1611
1612 BUG_ON(len > ghc->len);
1613
1590 if (slots->generation != ghc->generation) 1614 if (slots->generation != ghc->generation)
1591 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); 1615 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1616
1617 if (unlikely(!ghc->memslot))
1618 return kvm_read_guest(kvm, ghc->gpa, data, len);
1592 1619
1593 if (kvm_is_error_hva(ghc->hva)) 1620 if (kvm_is_error_hva(ghc->hva))
1594 return -EFAULT; 1621 return -EFAULT;