aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2013-04-02 04:04:39 -0400
committerJens Axboe <axboe@kernel.dk>2013-04-02 04:04:39 -0400
commit64f8de4da7d3962632f152d3d702d68bb8accc29 (patch)
treec90a872a6d91c824635d59572e1e578980f4bc98
parentf1fb3449efd5c49b48e35746bc7283eb9c73e3a0 (diff)
parentb5c872ddb7083c7909fb76a170c3807e04564bb3 (diff)
Merge branch 'writeback-workqueue' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq into for-3.10/core
Tejun writes: ----- This is the pull request for the earlier patchset[1] with the same name. It's only three patches (the first one was committed to workqueue tree) but the merge strategy is a bit involved due to the dependencies. * Because the conversion needs features from wq/for-3.10, block/for-3.10/core is based on rc3, and wq/for-3.10 has conflicts with rc3, I pulled mainline (rc5) into wq/for-3.10 to prevent those workqueue conflicts from flaring up in block tree. * Resolving the issue that Jan and Dave raised about debugging requires arch-wide changes. The patchset is being worked on[2] but it'll have to go through -mm after these changes show up in -next, and not included in this pull request. The three commits are located in the following git branch. git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git writeback-workqueue Pulling it into block/for-3.10/core produces a conflict in drivers/md/raid5.c between the following two commits. e3620a3ad5 ("MD RAID5: Avoid accessing gendisk or queue structs when not available") 2f6db2a707 ("raid5: use bio_reset()") The conflict is trivial - one removes an "if ()" conditional while the other removes "rbi->bi_next = NULL" right above it. We just need to remove both. The merged branch is available at git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git block-test-merge so that you can use it for verification. The test merge commit has proper merge description. While these changes are a bit of pain to route, they make code simpler and even have, while minute, measureable performance gain[3] even on a workload which isn't particularly favorable to showing the benefits of this conversion. ---- Fixed up the conflict. Conflicts: drivers/md/raid5.c Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--CREDITS8
-rw-r--r--Documentation/hwmon/lm752
-rw-r--r--Documentation/i2c/busses/i2c-diolan-u2c2
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--Documentation/networking/ipvs-sysctl.txt7
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt2
-rw-r--r--Documentation/sound/alsa/seq_oss.html2
-rw-r--r--MAINTAINERS56
-rw-r--r--Makefile2
-rw-r--r--arch/arc/include/asm/dma-mapping.h2
-rw-r--r--arch/arc/include/asm/elf.h3
-rw-r--r--arch/arc/include/asm/entry.h2
-rw-r--r--arch/arc/include/asm/kgdb.h6
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/syscalls.h2
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h4
-rw-r--r--arch/arc/kernel/entry.S27
-rw-r--r--arch/arc/kernel/kgdb.c1
-rw-r--r--arch/arc/kernel/setup.c4
-rw-r--r--arch/arc/kernel/sys.c2
-rw-r--r--arch/arm/Kconfig11
-rw-r--r--arch/arm/Kconfig.debug1
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi28
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi9
-rw-r--r--arch/arm/boot/dts/exynos5440.dtsi6
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi2
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/lib/memset.S33
-rw-r--r--arch/arm/mach-at91/include/mach/gpio.h8
-rw-r--r--arch/arm/mach-at91/irq.c20
-rw-r--r--arch/arm/mach-at91/pm.c10
-rw-r--r--arch/arm/mach-davinci/dma.c3
-rw-r--r--arch/arm/mach-footbridge/Kconfig1
-rw-r--r--arch/arm/mach-imx/clk-imx35.c1
-rw-r--r--arch/arm/mach-imx/imx25-dt.c5
-rw-r--r--arch/arm/mach-mmp/gplugd.c1
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c24
-rw-r--r--arch/arm/mach-s5pv210/clock.c36
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c2
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c1
-rw-r--r--arch/arm/net/bpf_jit_32.c2
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/Kconfig.debug11
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/ucontext.h2
-rw-r--r--arch/arm64/kernel/arm64ksyms.c2
-rw-r--r--arch/arm64/kernel/signal32.c1
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/ia64/kernel/process.c5
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h128
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S178
-rw-r--r--arch/powerpc/kernel/prom_init.c14
-rw-r--r--arch/powerpc/kernel/ptrace.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c4
-rw-r--r--arch/powerpc/mm/hash_utils_64.c22
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c11
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/mm/slb_low.S50
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
-rw-r--r--arch/powerpc/perf/power7-pmu.c13
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c6
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype6
-rw-r--r--arch/s390/include/asm/eadm.h6
-rw-r--r--arch/s390/include/asm/tlbflush.h2
-rw-r--r--arch/s390/kernel/entry.S3
-rw-r--r--arch/s390/kernel/entry64.S5
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/sparc/Kconfig8
-rw-r--r--arch/sparc/include/asm/spitfire.h1
-rw-r--r--arch/sparc/kernel/cpu.c6
-rw-r--r--arch/sparc/kernel/head_64.S25
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c41
-rw-r--r--arch/tile/configs/tilegx_defconfig1
-rw-r--r--arch/tile/configs/tilepro_defconfig1
-rw-r--r--arch/x86/include/asm/kprobes.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/include/asm/xen/hypercall.h4
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c4
-rw-r--r--arch/x86/kernel/kprobes/core.c5
-rw-r--r--arch/x86/kernel/microcode_intel_early.c30
-rw-r--r--arch/x86/kvm/x86.c64
-rw-r--r--arch/x86/lib/usercopy_64.c4
-rw-r--r--arch/x86/xen/mmu.c3
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/partition-generic.c1
-rw-r--r--drivers/acpi/apei/cper.c2
-rw-r--r--drivers/acpi/pci_root.c9
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/amba/tegra-ahb.c2
-rw-r--r--drivers/ata/Kconfig13
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ata_piix.c4
-rw-r--r--drivers/ata/libata-acpi.c2
-rw-r--r--drivers/ata/pata_samsung_cf.c13
-rw-r--r--drivers/ata/sata_fsl.c3
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/bus.c73
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/block/Kconfig4
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/loop.c22
-rw-r--r--drivers/block/mg_disk.c4
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c4
-rw-r--r--drivers/block/nvme.c33
-rw-r--r--drivers/block/rbd.c47
-rw-r--r--drivers/block/rsxx/Makefile2
-rw-r--r--drivers/block/rsxx/config.c8
-rw-r--r--drivers/block/rsxx/core.c237
-rw-r--r--drivers/block/rsxx/cregs.c112
-rw-r--r--drivers/block/rsxx/dma.c239
-rw-r--r--drivers/block/rsxx/rsxx.h6
-rw-r--r--drivers/block/rsxx/rsxx_cfg.h2
-rw-r--r--drivers/block/rsxx/rsxx_priv.h34
-rw-r--r--drivers/block/xen-blkback/blkback.c68
-rw-r--r--drivers/block/xen-blkback/common.h40
-rw-r--r--drivers/block/xen-blkback/xenbus.c14
-rw-r--r--drivers/block/xen-blkfront.c154
-rw-r--r--drivers/bluetooth/ath3k.c8
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/clk/clk-vt8500.c2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_stats.c12
-rw-r--r--drivers/cpufreq/intel_pstate.c34
-rw-r--r--drivers/crypto/caam/caamalg.c27
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/talitos.c30
-rw-r--r--drivers/dma/dw_dmac.c23
-rw-r--r--drivers/dma/dw_dmac_regs.h1
-rw-r--r--drivers/edac/amd64_edac.c15
-rw-r--r--drivers/edac/edac_mc.c6
-rw-r--r--drivers/edac/edac_mc_sysfs.c17
-rw-r--r--drivers/extcon/extcon-max77693.c103
-rw-r--r--drivers/extcon/extcon-max8997.c56
-rw-r--r--drivers/firmware/Kconfig18
-rw-r--r--drivers/firmware/efivars.c150
-rw-r--r--drivers/gpio/gpiolib-of.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c370
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c11
-rw-r--r--drivers/gpu/drm/i915/intel_display.c10
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c14
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c11
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c67
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c44
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c21
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-multitouch.c6
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hwmon/lm75.h2
-rw-r--r--drivers/i2c/Kconfig2
-rw-r--r--drivers/i2c/busses/Kconfig6
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c13
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/Kconfig6
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c8
-rw-r--r--drivers/input/joystick/analog.c8
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/amd_iommu.c22
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/irq_remapping.c1
-rw-r--r--drivers/isdn/hisax/Kconfig6
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-metadata.c64
-rw-r--r--drivers/md/dm-cache-metadata.h2
-rw-r--r--drivers/md/dm-cache-policy-cleaner.c7
-rw-r--r--drivers/md/dm-cache-policy-internal.h2
-rw-r--r--drivers/md/dm-cache-policy-mq.c8
-rw-r--r--drivers/md/dm-cache-policy.c8
-rw-r--r--drivers/md/dm-cache-policy.h2
-rw-r--r--drivers/md/dm-cache-target.c169
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm-verity.c39
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c46
-rw-r--r--drivers/md/raid5.c116
-rw-r--r--drivers/md/raid5.h5
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c20
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c8
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-core.c6
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite-reg.c8
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite.c1
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-mdevice.c39
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c1
-rw-r--r--drivers/media/rc/Kconfig2
-rw-r--r--drivers/media/v4l2-core/Makefile2
-rw-r--r--drivers/misc/mei/hw-me.c29
-rw-r--r--drivers/misc/mei/init.c18
-rw-r--r--drivers/misc/mei/mei_dev.h1
-rw-r--r--drivers/misc/mei/pci-me.c52
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.c4
-rw-r--r--drivers/mtd/bcm47xxpart.c52
-rw-r--r--drivers/mtd/nand/nand_base.c16
-rw-r--r--drivers/mtd/nand/nand_ids.c80
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/bonding/bond_sysfs.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fec.c33
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c33
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c24
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c45
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c10
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h1
-rw-r--r--drivers/net/ethernet/sfc/nic.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/netconsole.c15
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/cdc_mbim.c11
-rw-r--r--drivers/net/usb/cdc_ncm.c49
-rw-r--r--drivers/net/usb/qmi_wwan.c49
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c26
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c22
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c22
-rw-r--r--drivers/net/wireless/mwifiex/init.c8
-rw-r--r--drivers/net/wireless/mwifiex/join.c7
-rw-r--r--drivers/net/wireless/mwifiex/main.h4
-rw-r--r--drivers/net/wireless/mwifiex/scan.c8
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c10
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c89
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c1
-rw-r--r--drivers/pci/rom.c55
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c2
-rw-r--r--drivers/pinctrl/pinconf.c2
-rw-r--r--drivers/pinctrl/pinconf.h2
-rw-r--r--drivers/pinctrl/pinctrl-abx500.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91.c61
-rw-r--r--drivers/pinctrl/pinmux.c5
-rw-r--r--drivers/rtc/rtc-at91rm9200.c50
-rw-r--r--drivers/rtc/rtc-at91rm9200.h1
-rw-r--r--drivers/rtc/rtc-da9052.c8
-rw-r--r--drivers/s390/block/scm_blk.c69
-rw-r--r--drivers/s390/block/scm_blk.h2
-rw-r--r--drivers/s390/block/scm_drv.c23
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/cio/chsc.c17
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/s390/cio/scm.c18
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c45
-rw-r--r--drivers/s390/net/qeth_l3_main.c23
-rw-r--r--drivers/s390/net/qeth_l3_sys.c2
-rw-r--r--drivers/staging/comedi/drivers/s626.c2
-rw-r--r--drivers/staging/zcache/Kconfig2
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c5
-rw-r--r--drivers/target/target_core_file.h2
-rw-r--r--drivers/target/target_core_pscsi.c11
-rw-r--r--drivers/target/target_core_sbc.c7
-rw-r--r--drivers/target/target_core_tpg.c3
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/thermal/dove_thermal.c16
-rw-r--r--drivers/thermal/exynos_thermal.c2
-rw-r--r--drivers/thermal/kirkwood_thermal.c8
-rw-r--r--drivers/thermal/rcar_thermal.c29
-rw-r--r--drivers/tty/serial/8250/8250_core.c (renamed from drivers/tty/serial/8250/8250.c)6
-rw-r--r--drivers/tty/serial/8250/8250_pci.c13
-rw-r--r--drivers/tty/serial/8250/Kconfig17
-rw-r--r--drivers/tty/serial/8250/Makefile8
-rw-r--r--drivers/tty/serial/atmel_serial.c11
-rw-r--r--drivers/tty/serial/sunsu.c21
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/vt/vc_screen.c6
-rw-r--r--drivers/usb/class/cdc-acm.c22
-rw-r--r--drivers/usb/core/hcd-pci.c23
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/core/usb-acpi.c8
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/f_rndis.c3
-rw-r--r--drivers/usb/gadget/g_ffs.c4
-rw-r--r--drivers/usb/gadget/net2272.c9
-rw-r--r--drivers/usb/gadget/net2280.c8
-rw-r--r--drivers/usb/gadget/u_serial.c2
-rw-r--r--drivers/usb/gadget/udc-core.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c1
-rw-r--r--drivers/usb/host/ehci-hub.c2
-rw-r--r--drivers/usb/host/ehci-q.c13
-rw-r--r--drivers/usb/host/ehci-sched.c2
-rw-r--r--drivers/usb/host/ehci-timer.c2
-rw-r--r--drivers/usb/host/xhci-mem.c36
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c61
-rw-r--r--drivers/usb/host/xhci.c25
-rw-r--r--drivers/usb/host/xhci.h9
-rw-r--r--drivers/usb/musb/da8xx.c2
-rw-r--r--drivers/usb/musb/musb_gadget.c9
-rw-r--r--drivers/usb/phy/Kconfig1
-rw-r--r--drivers/usb/serial/ark3116.c10
-rw-r--r--drivers/usb/serial/ch341.c11
-rw-r--r--drivers/usb/serial/cypress_m8.c14
-rw-r--r--drivers/usb/serial/f81232.c9
-rw-r--r--drivers/usb/serial/ftdi_sio.c20
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/garmin_gps.c7
-rw-r--r--drivers/usb/serial/io_edgeport.c12
-rw-r--r--drivers/usb/serial/io_ti.c13
-rw-r--r--drivers/usb/serial/mct_u232.c13
-rw-r--r--drivers/usb/serial/mos7840.c16
-rw-r--r--drivers/usb/serial/oti6858.c10
-rw-r--r--drivers/usb/serial/pl2303.c11
-rw-r--r--drivers/usb/serial/quatech2.c12
-rw-r--r--drivers/usb/serial/spcp8x5.c9
-rw-r--r--drivers/usb/serial/ssu100.c12
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c10
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c1
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c1
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/tcm_vhost.c26
-rw-r--r--drivers/video/atmel_lcdfb.c22
-rw-r--r--drivers/video/ep93xx-fb.c1
-rw-r--r--drivers/video/mxsfb.c7
-rw-r--r--drivers/video/omap/omapfb_main.c2
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c13
-rw-r--r--drivers/video/omap2/dss/dss_features.c6
-rw-r--r--drivers/watchdog/sp5100_tco.c126
-rw-r--r--drivers/watchdog/sp5100_tco.h2
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/events.c20
-rw-r--r--drivers/xen/fallback.c3
-rw-r--r--drivers/xen/xen-acpi-processor.c3
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c59
-rw-r--r--firmware/Makefile2
-rw-r--r--firmware/intel/sd7220.fw.ihex (renamed from firmware/qlogic/sd7220.fw.ihex)0
-rw-r--r--fs/btrfs/ctree.c30
-rw-r--r--fs/btrfs/disk-io.c14
-rw-r--r--fs/btrfs/extent-tree.c84
-rw-r--r--fs/btrfs/extent_io.c33
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/file-item.c6
-rw-r--r--fs/btrfs/file.c9
-rw-r--r--fs/btrfs/inode.c22
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/qgroup.c3
-rw-r--r--fs/btrfs/scrub.c3
-rw-r--r--fs/btrfs/send.c10
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/cifs/asn1.c53
-rw-r--r--fs/cifs/cifsfs.c24
-rw-r--r--fs/cifs/cifsfs.h4
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/inode.c10
-rw-r--r--fs/cifs/netmisc.c2
-rw-r--r--fs/dcache.c16
-rw-r--r--fs/ext4/ext4.h8
-rw-r--r--fs/ext4/extents.c105
-rw-r--r--fs/ext4/extents_status.c212
-rw-r--r--fs/ext4/extents_status.h9
-rw-r--r--fs/ext4/ialloc.c4
-rw-r--r--fs/ext4/inode.c182
-rw-r--r--fs/ext4/mballoc.c23
-rw-r--r--fs/ext4/move_extent.c43
-rw-r--r--fs/ext4/page-io.c12
-rw-r--r--fs/ext4/resize.c4
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/fs-writeback.c102
-rw-r--r--fs/internal.h5
-rw-r--r--fs/jbd2/transaction.c15
-rw-r--r--fs/namespace.c54
-rw-r--r--fs/nfs/blocklayout/blocklayoutdm.c4
-rw-r--r--fs/nfs/idmap.c13
-rw-r--r--fs/nfs/nfs4filelayout.c1
-rw-r--r--fs/nfs/nfs4proc.c16
-rw-r--r--fs/nfs/pnfs.c81
-rw-r--r--fs/nfs/pnfs.h6
-rw-r--r--fs/nfsd/nfscache.c11
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/pnode.c6
-rw-r--r--fs/pnode.h1
-rw-r--r--fs/proc/inode.c6
-rw-r--r--fs/proc/root.c4
-rw-r--r--fs/read_write.c28
-rw-r--r--fs/splice.c4
-rw-r--r--fs/sysfs/dir.c17
-rw-r--r--fs/sysfs/mount.c4
-rw-r--r--fs/xfs/xfs_buf.c6
-rw-r--r--fs/xfs/xfs_iomap.c4
-rw-r--r--include/drm/drm_pciids.h13
-rw-r--r--include/linux/backing-dev.h16
-rw-r--r--include/linux/cpumask.h15
-rw-r--r--include/linux/debug_locks.h4
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/edac.h7
-rw-r--r--include/linux/freezer.h3
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/hash.h3
-rw-r--r--include/linux/irq_work.h2
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/mfd/max77693-private.h23
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mman.h4
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/mtd/nand.h7
-rw-r--r--include/linux/mxsfb.h7
-rw-r--r--include/linux/nvme.h28
-rw-r--r--include/linux/printk.h6
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/skbuff.h13
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/udp.h1
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/ulpi.h8
-rw-r--r--include/linux/user_namespace.h4
-rw-r--r--include/linux/workqueue.h166
-rw-r--r--include/net/dst.h6
-rw-r--r--include/net/flow_keys.h1
-rw-r--r--include/net/inet_frag.h9
-rw-r--r--include/net/ip_fib.h12
-rw-r--r--include/net/ip_vs.h12
-rw-r--r--include/net/ipip.h16
-rw-r--r--include/trace/events/writeback.h5
-rw-r--r--include/uapi/linux/packet_diag.h4
-rw-r--r--include/uapi/linux/unix_diag.h4
-rw-r--r--include/video/atmel_lcdc.h2
-rw-r--r--include/xen/interface/io/blkif.h10
-rw-r--r--include/xen/interface/physdev.h6
-rw-r--r--ipc/mqueue.c15
-rw-r--r--kernel/cgroup.c4
-rw-r--r--kernel/cpuset.c16
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/lockdep.c17
-rw-r--r--kernel/pid_namespace.c3
-rw-r--r--kernel/printk.c80
-rw-r--r--kernel/sched/core.c9
-rw-r--r--kernel/sys.c57
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace.c59
-rw-r--r--kernel/trace/trace.h6
-rw-r--r--kernel/trace/trace_irqsoff.c19
-rw-r--r--kernel/trace/trace_sched_wakeup.c18
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/user_namespace.c11
-rw-r--r--kernel/workqueue.c2868
-rw-r--r--kernel/workqueue_internal.h9
-rw-r--r--lib/bust_spinlocks.c3
-rw-r--r--lib/dma-debug.c45
-rw-r--r--mm/backing-dev.c259
-rw-r--r--mm/fremap.c12
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/mlock.c11
-rw-r--r--mm/mmap.c4
-rw-r--r--net/8021q/vlan.c14
-rw-r--r--net/batman-adv/bat_iv_ogm.c6
-rw-r--r--net/bluetooth/sco.c1
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/scm.c4
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/inet_fragment.c20
-rw-r--r--net/ipv4/ip_fragment.c11
-rw-r--r--net/ipv4/ip_gre.c5
-rw-r--r--net/ipv4/ip_options.c5
-rw-r--r--net/ipv4/ipconfig.c3
-rw-r--r--net/ipv4/netfilter/Kconfig13
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c7
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv4/tcp_output.c8
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/addrconf.c26
-rw-r--r--net/ipv6/netfilter/ip6t_NPT.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c12
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/ipv6/tcp_ipv6.c7
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/irda/af_irda.c6
-rw-r--r--net/l2tp/l2tp_core.c206
-rw-r--r--net/l2tp/l2tp_core.h22
-rw-r--r--net/l2tp/l2tp_debugfs.c28
-rw-r--r--net/l2tp/l2tp_ip.c6
-rw-r--r--net/l2tp/l2tp_ip6.c7
-rw-r--r--net/l2tp/l2tp_netlink.c72
-rw-r--r--net/l2tp/l2tp_ppp.c111
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c16
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c12
-rw-r--r--net/netfilter/nfnetlink_queue_core.c2
-rw-r--r--net/netlink/genetlink.c1
-rw-r--r--net/nfc/llcp/llcp.c62
-rw-r--r--net/nfc/llcp/sock.c2
-rw-r--r--net/openvswitch/actions.c4
-rw-r--r--net/openvswitch/datapath.c3
-rw-r--r--net/openvswitch/flow.c6
-rw-r--r--net/openvswitch/vport-netdev.c3
-rw-r--r--net/openvswitch/vport.c3
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sunrpc/sched.c9
-rw-r--r--net/unix/af_unix.c11
-rw-r--r--security/selinux/xfrm.c2
-rw-r--r--security/yama/yama_lsm.c4
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_generic.c46
-rw-r--r--sound/pci/hda/hda_intel.c132
-rw-r--r--sound/pci/hda/patch_cirrus.c4
-rw-r--r--sound/pci/hda/patch_conexant.c16
-rw-r--r--sound/usb/mixer.c21
-rw-r--r--tools/lib/traceevent/Makefile2
-rw-r--r--tools/perf/Makefile8
-rw-r--r--tools/perf/bench/bench.h24
-rw-r--r--tools/perf/builtin-record.c6
-rw-r--r--tools/perf/util/hist.h5
-rw-r--r--tools/perf/util/strlist.c2
-rw-r--r--virt/kvm/ioapic.c7
572 files changed, 8490 insertions, 4347 deletions
diff --git a/CREDITS b/CREDITS
index 78163cb3eb6a..afaa7cec6ea5 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1510,6 +1510,14 @@ D: Natsemi ethernet
1510D: Cobalt Networks (x86) support 1510D: Cobalt Networks (x86) support
1511D: This-and-That 1511D: This-and-That
1512 1512
1513N: Mark M. Hoffman
1514E: mhoffman@lightlink.com
1515D: asb100, lm93 and smsc47b397 hardware monitoring drivers
1516D: hwmon subsystem core
1517D: hwmon subsystem maintainer
1518D: i2c-sis96x and i2c-stub SMBus drivers
1519S: USA
1520
1513N: Dirk Hohndel 1521N: Dirk Hohndel
1514E: hohndel@suse.de 1522E: hohndel@suse.de
1515D: The XFree86[tm] Project 1523D: The XFree86[tm] Project
diff --git a/Documentation/hwmon/lm75 b/Documentation/hwmon/lm75
index c91a1d15fa28..69af1c7db6b7 100644
--- a/Documentation/hwmon/lm75
+++ b/Documentation/hwmon/lm75
@@ -23,7 +23,7 @@ Supported chips:
23 Datasheet: Publicly available at the Maxim website 23 Datasheet: Publicly available at the Maxim website
24 http://www.maxim-ic.com/ 24 http://www.maxim-ic.com/
25 * Microchip (TelCom) TCN75 25 * Microchip (TelCom) TCN75
26 Prefix: 'lm75' 26 Prefix: 'tcn75'
27 Addresses scanned: none 27 Addresses scanned: none
28 Datasheet: Publicly available at the Microchip website 28 Datasheet: Publicly available at the Microchip website
29 http://www.microchip.com/ 29 http://www.microchip.com/
diff --git a/Documentation/i2c/busses/i2c-diolan-u2c b/Documentation/i2c/busses/i2c-diolan-u2c
index 30fe4bb9a069..0d6018c316c7 100644
--- a/Documentation/i2c/busses/i2c-diolan-u2c
+++ b/Documentation/i2c/busses/i2c-diolan-u2c
@@ -5,7 +5,7 @@ Supported adapters:
5 Documentation: 5 Documentation:
6 http://www.diolan.com/i2c/u2c12.html 6 http://www.diolan.com/i2c/u2c12.html
7 7
8Author: Guenter Roeck <guenter.roeck@ericsson.com> 8Author: Guenter Roeck <linux@roeck-us.net>
9 9
10Description 10Description
11----------- 11-----------
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 4609e81dbc37..c75ea0b8ec59 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3222,6 +3222,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3222 or other driver-specific files in the 3222 or other driver-specific files in the
3223 Documentation/watchdog/ directory. 3223 Documentation/watchdog/ directory.
3224 3224
3225 workqueue.disable_numa
3226 By default, all work items queued to unbound
3227 workqueues are affine to the NUMA nodes they're
3228 issued on, which results in better behavior in
3229 general. If NUMA affinity needs to be disabled for
3230 whatever reason, this option can be used. Note
3231 that this also can be controlled per-workqueue for
3232 workqueues visible under /sys/bus/workqueue/.
3233
3225 x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of 3234 x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
3226 default x2apic cluster mode on platforms 3235 default x2apic cluster mode on platforms
3227 supporting x2apic. 3236 supporting x2apic.
diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt
index f2a2488f1bf3..9573d0c48c6e 100644
--- a/Documentation/networking/ipvs-sysctl.txt
+++ b/Documentation/networking/ipvs-sysctl.txt
@@ -15,6 +15,13 @@ amemthresh - INTEGER
15 enabled and the variable is automatically set to 2, otherwise 15 enabled and the variable is automatically set to 2, otherwise
16 the strategy is disabled and the variable is set to 1. 16 the strategy is disabled and the variable is set to 1.
17 17
18backup_only - BOOLEAN
19 0 - disabled (default)
20 not 0 - enabled
21
22 If set, disable the director function while the server is
23 in backup mode to avoid packet loops for DR/TUN methods.
24
18conntrack - BOOLEAN 25conntrack - BOOLEAN
19 0 - disabled (default) 26 0 - disabled (default)
20 not 0 - enabled 27 not 0 - enabled
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index ce6581c8ca26..4499bd948860 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -912,7 +912,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
912 models depending on the codec chip. The list of available models 912 models depending on the codec chip. The list of available models
913 is found in HD-Audio-Models.txt 913 is found in HD-Audio-Models.txt
914 914
915 The model name "genric" is treated as a special case. When this 915 The model name "generic" is treated as a special case. When this
916 model is given, the driver uses the generic codec parser without 916 model is given, the driver uses the generic codec parser without
917 "codec-patch". It's sometimes good for testing and debugging. 917 "codec-patch". It's sometimes good for testing and debugging.
918 918
diff --git a/Documentation/sound/alsa/seq_oss.html b/Documentation/sound/alsa/seq_oss.html
index d9776cf60c07..9663b45f6fde 100644
--- a/Documentation/sound/alsa/seq_oss.html
+++ b/Documentation/sound/alsa/seq_oss.html
@@ -285,7 +285,7 @@ sample data.
285<H4> 285<H4>
2867.2.4 Close Callback</H4> 2867.2.4 Close Callback</H4>
287The <TT>close</TT> callback is called when this device is closed by the 287The <TT>close</TT> callback is called when this device is closed by the
288applicaion. If any private data was allocated in open callback, it must 288application. If any private data was allocated in open callback, it must
289be released in the close callback. The deletion of ALSA port should be 289be released in the close callback. The deletion of ALSA port should be
290done here, too. This callback must not be NULL. 290done here, too. This callback must not be NULL.
291<H4> 291<H4>
diff --git a/MAINTAINERS b/MAINTAINERS
index 50b4d735f961..74e58a4d035b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1338,12 +1338,6 @@ S: Maintained
1338F: drivers/platform/x86/asus*.c 1338F: drivers/platform/x86/asus*.c
1339F: drivers/platform/x86/eeepc*.c 1339F: drivers/platform/x86/eeepc*.c
1340 1340
1341ASUS ASB100 HARDWARE MONITOR DRIVER
1342M: "Mark M. Hoffman" <mhoffman@lightlink.com>
1343L: lm-sensors@lm-sensors.org
1344S: Maintained
1345F: drivers/hwmon/asb100.c
1346
1347ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API 1341ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
1348M: Dan Williams <djbw@fb.com> 1342M: Dan Williams <djbw@fb.com>
1349W: http://sourceforge.net/projects/xscaleiop 1343W: http://sourceforge.net/projects/xscaleiop
@@ -1467,6 +1461,12 @@ F: drivers/dma/at_hdmac.c
1467F: drivers/dma/at_hdmac_regs.h 1461F: drivers/dma/at_hdmac_regs.h
1468F: include/linux/platform_data/dma-atmel.h 1462F: include/linux/platform_data/dma-atmel.h
1469 1463
1464ATMEL I2C DRIVER
1465M: Ludovic Desroches <ludovic.desroches@atmel.com>
1466L: linux-i2c@vger.kernel.org
1467S: Supported
1468F: drivers/i2c/busses/i2c-at91.c
1469
1470ATMEL ISI DRIVER 1470ATMEL ISI DRIVER
1471M: Josh Wu <josh.wu@atmel.com> 1471M: Josh Wu <josh.wu@atmel.com>
1472L: linux-media@vger.kernel.org 1472L: linux-media@vger.kernel.org
@@ -2629,7 +2629,7 @@ F: include/uapi/drm/
2629 2629
2630INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) 2630INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
2631M: Daniel Vetter <daniel.vetter@ffwll.ch> 2631M: Daniel Vetter <daniel.vetter@ffwll.ch>
2632L: intel-gfx@lists.freedesktop.org (subscribers-only) 2632L: intel-gfx@lists.freedesktop.org
2633L: dri-devel@lists.freedesktop.org 2633L: dri-devel@lists.freedesktop.org
2634T: git git://people.freedesktop.org/~danvet/drm-intel 2634T: git git://people.freedesktop.org/~danvet/drm-intel
2635S: Supported 2635S: Supported
@@ -3242,6 +3242,12 @@ F: Documentation/firmware_class/
3242F: drivers/base/firmware*.c 3242F: drivers/base/firmware*.c
3243F: include/linux/firmware.h 3243F: include/linux/firmware.h
3244 3244
3245FLASHSYSTEM DRIVER (IBM FlashSystem 70/80 PCI SSD Flash Card)
3246M: Joshua Morris <josh.h.morris@us.ibm.com>
3247M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
3248S: Maintained
3249F: drivers/block/rsxx/
3250
3245FLOPPY DRIVER 3251FLOPPY DRIVER
3246M: Jiri Kosina <jkosina@suse.cz> 3252M: Jiri Kosina <jkosina@suse.cz>
3247T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git 3253T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
@@ -3851,7 +3857,7 @@ F: drivers/i2c/busses/i2c-ismt.c
3851F: Documentation/i2c/busses/i2c-ismt 3857F: Documentation/i2c/busses/i2c-ismt
3852 3858
3853I2C/SMBUS STUB DRIVER 3859I2C/SMBUS STUB DRIVER
3854M: "Mark M. Hoffman" <mhoffman@lightlink.com> 3860M: Jean Delvare <khali@linux-fr.org>
3855L: linux-i2c@vger.kernel.org 3861L: linux-i2c@vger.kernel.org
3856S: Maintained 3862S: Maintained
3857F: drivers/i2c/i2c-stub.c 3863F: drivers/i2c/i2c-stub.c
@@ -5647,6 +5653,14 @@ S: Maintained
5647F: drivers/video/riva/ 5653F: drivers/video/riva/
5648F: drivers/video/nvidia/ 5654F: drivers/video/nvidia/
5649 5655
5656NVM EXPRESS DRIVER
5657M: Matthew Wilcox <willy@linux.intel.com>
5658L: linux-nvme@lists.infradead.org
5659T: git git://git.infradead.org/users/willy/linux-nvme.git
5660S: Supported
5661F: drivers/block/nvme.c
5662F: include/linux/nvme.h
5663
5650OMAP SUPPORT 5664OMAP SUPPORT
5651M: Tony Lindgren <tony@atomide.com> 5665M: Tony Lindgren <tony@atomide.com>
5652L: linux-omap@vger.kernel.org 5666L: linux-omap@vger.kernel.org
@@ -5675,7 +5689,7 @@ S: Maintained
5675F: arch/arm/*omap*/*clock* 5689F: arch/arm/*omap*/*clock*
5676 5690
5677OMAP POWER MANAGEMENT SUPPORT 5691OMAP POWER MANAGEMENT SUPPORT
5678M: Kevin Hilman <khilman@ti.com> 5692M: Kevin Hilman <khilman@deeprootsystems.com>
5679L: linux-omap@vger.kernel.org 5693L: linux-omap@vger.kernel.org
5680S: Maintained 5694S: Maintained
5681F: arch/arm/*omap*/*pm* 5695F: arch/arm/*omap*/*pm*
@@ -5769,7 +5783,7 @@ F: arch/arm/*omap*/usb*
5769 5783
5770OMAP GPIO DRIVER 5784OMAP GPIO DRIVER
5771M: Santosh Shilimkar <santosh.shilimkar@ti.com> 5785M: Santosh Shilimkar <santosh.shilimkar@ti.com>
5772M: Kevin Hilman <khilman@ti.com> 5786M: Kevin Hilman <khilman@deeprootsystems.com>
5773L: linux-omap@vger.kernel.org 5787L: linux-omap@vger.kernel.org
5774S: Maintained 5788S: Maintained
5775F: drivers/gpio/gpio-omap.c 5789F: drivers/gpio/gpio-omap.c
@@ -6201,7 +6215,7 @@ F: include/linux/power_supply.h
6201F: drivers/power/ 6215F: drivers/power/
6202 6216
6203PNP SUPPORT 6217PNP SUPPORT
6204M: Adam Belay <abelay@mit.edu> 6218M: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6205M: Bjorn Helgaas <bhelgaas@google.com> 6219M: Bjorn Helgaas <bhelgaas@google.com>
6206S: Maintained 6220S: Maintained
6207F: drivers/pnp/ 6221F: drivers/pnp/
@@ -6543,12 +6557,6 @@ S: Maintained
6543F: Documentation/blockdev/ramdisk.txt 6557F: Documentation/blockdev/ramdisk.txt
6544F: drivers/block/brd.c 6558F: drivers/block/brd.c
6545 6559
6546RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card)
6547M: Joshua Morris <josh.h.morris@us.ibm.com>
6548M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
6549S: Maintained
6550F: drivers/block/rsxx/
6551
6552RANDOM NUMBER DRIVER 6560RANDOM NUMBER DRIVER
6553M: Theodore Ts'o" <tytso@mit.edu> 6561M: Theodore Ts'o" <tytso@mit.edu>
6554S: Maintained 6562S: Maintained
@@ -7165,7 +7173,7 @@ F: arch/arm/mach-s3c2410/bast-irq.c
7165 7173
7166TI DAVINCI MACHINE SUPPORT 7174TI DAVINCI MACHINE SUPPORT
7167M: Sekhar Nori <nsekhar@ti.com> 7175M: Sekhar Nori <nsekhar@ti.com>
7168M: Kevin Hilman <khilman@ti.com> 7176M: Kevin Hilman <khilman@deeprootsystems.com>
7169L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers) 7177L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
7170T: git git://gitorious.org/linux-davinci/linux-davinci.git 7178T: git git://gitorious.org/linux-davinci/linux-davinci.git
7171Q: http://patchwork.kernel.org/project/linux-davinci/list/ 7179Q: http://patchwork.kernel.org/project/linux-davinci/list/
@@ -7198,13 +7206,6 @@ L: netdev@vger.kernel.org
7198S: Maintained 7206S: Maintained
7199F: drivers/net/ethernet/sis/sis900.* 7207F: drivers/net/ethernet/sis/sis900.*
7200 7208
7201SIS 96X I2C/SMBUS DRIVER
7202M: "Mark M. Hoffman" <mhoffman@lightlink.com>
7203L: linux-i2c@vger.kernel.org
7204S: Maintained
7205F: Documentation/i2c/busses/i2c-sis96x
7206F: drivers/i2c/busses/i2c-sis96x.c
7207
7208SIS FRAMEBUFFER DRIVER 7209SIS FRAMEBUFFER DRIVER
7209M: Thomas Winischhofer <thomas@winischhofer.net> 7210M: Thomas Winischhofer <thomas@winischhofer.net>
7210W: http://www.winischhofer.net/linuxsisvga.shtml 7211W: http://www.winischhofer.net/linuxsisvga.shtml
@@ -7282,7 +7283,7 @@ F: Documentation/hwmon/sch5627
7282F: drivers/hwmon/sch5627.c 7283F: drivers/hwmon/sch5627.c
7283 7284
7284SMSC47B397 HARDWARE MONITOR DRIVER 7285SMSC47B397 HARDWARE MONITOR DRIVER
7285M: "Mark M. Hoffman" <mhoffman@lightlink.com> 7286M: Jean Delvare <khali@linux-fr.org>
7286L: lm-sensors@lm-sensors.org 7287L: lm-sensors@lm-sensors.org
7287S: Maintained 7288S: Maintained
7288F: Documentation/hwmon/smsc47b397 7289F: Documentation/hwmon/smsc47b397
@@ -7705,9 +7706,10 @@ F: include/linux/swiotlb.h
7705 7706
7706SYNOPSYS ARC ARCHITECTURE 7707SYNOPSYS ARC ARCHITECTURE
7707M: Vineet Gupta <vgupta@synopsys.com> 7708M: Vineet Gupta <vgupta@synopsys.com>
7708L: linux-snps-arc@vger.kernel.org
7709S: Supported 7709S: Supported
7710F: arch/arc/ 7710F: arch/arc/
7711F: Documentation/devicetree/bindings/arc/
7712F: drivers/tty/serial/arc-uart.c
7711 7713
7712SYSV FILESYSTEM 7714SYSV FILESYSTEM
7713M: Christoph Hellwig <hch@infradead.org> 7715M: Christoph Hellwig <hch@infradead.org>
diff --git a/Makefile b/Makefile
index 22113a77f8ed..58a165b02af1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 9 2PATCHLEVEL = 9
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc5
5NAME = Unicycling Gorilla 5NAME = Unicycling Gorilla
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index 31f77aec0823..45b8e0cea176 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg,
126 int i; 126 int i;
127 127
128 for_each_sg(sg, s, nents, i) 128 for_each_sg(sg, s, nents, i)
129 sg->dma_address = dma_map_page(dev, sg_page(s), s->offset, 129 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
130 s->length, dir); 130 s->length, dir);
131 131
132 return nents; 132 return nents;
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index f4c8d36ebecb..a26282857683 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *);
72 */ 72 */
73#define ELF_PLATFORM (NULL) 73#define ELF_PLATFORM (NULL)
74 74
75#define SET_PERSONALITY(ex) \
76 set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
77
78#endif 75#endif
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 23daa326fc9b..eb2ae53187d9 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -415,7 +415,7 @@
415 *-------------------------------------------------------------*/ 415 *-------------------------------------------------------------*/
416.macro SAVE_ALL_EXCEPTION marker 416.macro SAVE_ALL_EXCEPTION marker
417 417
418 st \marker, [sp, 8] 418 st \marker, [sp, 8] /* orig_r8 */
419 st r0, [sp, 4] /* orig_r0, needed only for sys calls */ 419 st r0, [sp, 4] /* orig_r0, needed only for sys calls */
420 420
421 /* Restore r9 used to code the early prologue */ 421 /* Restore r9 used to code the early prologue */
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
index f3c4934f0ca9..4930957ca3d3 100644
--- a/arch/arc/include/asm/kgdb.h
+++ b/arch/arc/include/asm/kgdb.h
@@ -13,7 +13,7 @@
13 13
14#ifdef CONFIG_KGDB 14#ifdef CONFIG_KGDB
15 15
16#include <asm/user.h> 16#include <asm/ptrace.h>
17 17
18/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set 18/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
19 * register API yet */ 19 * register API yet */
@@ -53,9 +53,7 @@ enum arc700_linux_regnums {
53}; 53};
54 54
55#else 55#else
56static inline void kgdb_trap(struct pt_regs *regs, int param) 56#define kgdb_trap(regs, param)
57{
58}
59#endif 57#endif
60 58
61#endif /* __ARC_KGDB_H__ */ 59#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 8ae783d20a81..6179de7e07c2 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs)
123#define orig_r8_IS_SCALL 0x0001 123#define orig_r8_IS_SCALL 0x0001
124#define orig_r8_IS_SCALL_RESTARTED 0x0002 124#define orig_r8_IS_SCALL_RESTARTED 0x0002
125#define orig_r8_IS_BRKPT 0x0004 125#define orig_r8_IS_BRKPT 0x0004
126#define orig_r8_IS_EXCPN 0x0004 126#define orig_r8_IS_EXCPN 0x0008
127#define orig_r8_IS_IRQ1 0x0010 127#define orig_r8_IS_IRQ1 0x0010
128#define orig_r8_IS_IRQ2 0x0020 128#define orig_r8_IS_IRQ2 0x0020
129 129
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index e53a5340ba4f..dd785befe7fd 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -16,8 +16,6 @@
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18int sys_clone_wrapper(int, int, int, int, int); 18int sys_clone_wrapper(int, int, int, int, int);
19int sys_fork_wrapper(void);
20int sys_vfork_wrapper(void);
21int sys_cacheflush(uint32_t, uint32_t uint32_t); 19int sys_cacheflush(uint32_t, uint32_t uint32_t);
22int sys_arc_settls(void *); 20int sys_arc_settls(void *);
23int sys_arc_gettls(void); 21int sys_arc_gettls(void);
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 6afa4f702075..30333cec0fef 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -28,14 +28,14 @@
28*/ 28*/
29struct user_regs_struct { 29struct user_regs_struct {
30 30
31 struct scratch { 31 struct {
32 long pad; 32 long pad;
33 long bta, lp_start, lp_end, lp_count; 33 long bta, lp_start, lp_end, lp_count;
34 long status32, ret, blink, fp, gp; 34 long status32, ret, blink, fp, gp;
35 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 35 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
36 long sp; 36 long sp;
37 } scratch; 37 } scratch;
38 struct callee { 38 struct {
39 long pad; 39 long pad;
40 long r25, r24, r23, r22, r21, r20; 40 long r25, r24, r23, r22, r21, r20;
41 long r19, r18, r17, r16, r15, r14, r13; 41 long r19, r18, r17, r16, r15, r14, r13;
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index ef6800ba2f03..91eeab81f52d 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -452,7 +452,7 @@ tracesys:
452 ; using ERET won't work since next-PC has already committed 452 ; using ERET won't work since next-PC has already committed
453 lr r12, [efa] 453 lr r12, [efa]
454 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 454 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
455 st r12, [r11, THREAD_FAULT_ADDR] 455 st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
456 456
457 ; PRE Sys Call Ptrace hook 457 ; PRE Sys Call Ptrace hook
458 mov r0, sp ; pt_regs needed 458 mov r0, sp ; pt_regs needed
@@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork
792 792
793;################### Special Sys Call Wrappers ########################## 793;################### Special Sys Call Wrappers ##########################
794 794
795; TBD: call do_fork directly from here
796ARC_ENTRY sys_fork_wrapper
797 SAVE_CALLEE_SAVED_USER
798 bl @sys_fork
799 DISCARD_CALLEE_SAVED_USER
800
801 GET_CURR_THR_INFO_FLAGS r10
802 btst r10, TIF_SYSCALL_TRACE
803 bnz tracesys_exit
804
805 b ret_from_system_call
806ARC_EXIT sys_fork_wrapper
807
808ARC_ENTRY sys_vfork_wrapper
809 SAVE_CALLEE_SAVED_USER
810 bl @sys_vfork
811 DISCARD_CALLEE_SAVED_USER
812
813 GET_CURR_THR_INFO_FLAGS r10
814 btst r10, TIF_SYSCALL_TRACE
815 bnz tracesys_exit
816
817 b ret_from_system_call
818ARC_EXIT sys_vfork_wrapper
819
820ARC_ENTRY sys_clone_wrapper 795ARC_ENTRY sys_clone_wrapper
821 SAVE_CALLEE_SAVED_USER 796 SAVE_CALLEE_SAVED_USER
822 bl @sys_clone 797 bl @sys_clone
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
index 2888ba5be47e..52bdc83c1495 100644
--- a/arch/arc/kernel/kgdb.c
+++ b/arch/arc/kernel/kgdb.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/kgdb.h> 11#include <linux/kgdb.h>
12#include <linux/sched.h>
12#include <asm/disasm.h> 13#include <asm/disasm.h>
13#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
14 15
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index dc0f968dae0a..2d95ac07df7b 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
232 232
233 n += scnprintf(buf + n, len - n, "\n"); 233 n += scnprintf(buf + n, len - n, "\n");
234 234
235#ifdef _ASM_GENERIC_UNISTD_H
236 n += scnprintf(buf + n, len - n, 235 n += scnprintf(buf + n, len - n,
237 "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n"); 236 "OS ABI [v3]\t: no-legacy-syscalls\n");
238#endif
239 237
240 return buf; 238 return buf;
241} 239}
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
index f6bdd07583f3..9d6c1ca26af6 100644
--- a/arch/arc/kernel/sys.c
+++ b/arch/arc/kernel/sys.c
@@ -6,8 +6,6 @@
6#include <asm/syscalls.h> 6#include <asm/syscalls.h>
7 7
8#define sys_clone sys_clone_wrapper 8#define sys_clone sys_clone_wrapper
9#define sys_fork sys_fork_wrapper
10#define sys_vfork sys_vfork_wrapper
11 9
12#undef __SYSCALL 10#undef __SYSCALL
13#define __SYSCALL(nr, call) [nr] = (call), 11#define __SYSCALL(nr, call) [nr] = (call),
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 2c3bdce15134..13b739469c51 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -49,7 +49,6 @@ config ARM
49 select HAVE_REGS_AND_STACK_ACCESS_API 49 select HAVE_REGS_AND_STACK_ACCESS_API
50 select HAVE_SYSCALL_TRACEPOINTS 50 select HAVE_SYSCALL_TRACEPOINTS
51 select HAVE_UID16 51 select HAVE_UID16
52 select VIRT_TO_BUS
53 select KTIME_SCALAR 52 select KTIME_SCALAR
54 select PERF_USE_VMALLOC 53 select PERF_USE_VMALLOC
55 select RTC_LIB 54 select RTC_LIB
@@ -743,6 +742,7 @@ config ARCH_RPC
743 select NEED_MACH_IO_H 742 select NEED_MACH_IO_H
744 select NEED_MACH_MEMORY_H 743 select NEED_MACH_MEMORY_H
745 select NO_IOPORT 744 select NO_IOPORT
745 select VIRT_TO_BUS
746 help 746 help
747 On the Acorn Risc-PC, Linux can support the internal IDE disk and 747 On the Acorn Risc-PC, Linux can support the internal IDE disk and
748 CD-ROM interface, serial and parallel port, and the floppy drive. 748 CD-ROM interface, serial and parallel port, and the floppy drive.
@@ -878,6 +878,7 @@ config ARCH_SHARK
878 select ISA_DMA 878 select ISA_DMA
879 select NEED_MACH_MEMORY_H 879 select NEED_MACH_MEMORY_H
880 select PCI 880 select PCI
881 select VIRT_TO_BUS
881 select ZONE_DMA 882 select ZONE_DMA
882 help 883 help
883 Support for the StrongARM based Digital DNARD machine, also known 884 Support for the StrongARM based Digital DNARD machine, also known
@@ -1005,12 +1006,12 @@ config ARCH_MULTI_V4_V5
1005 bool 1006 bool
1006 1007
1007config ARCH_MULTI_V6 1008config ARCH_MULTI_V6
1008 bool "ARMv6 based platforms (ARM11, Scorpion, ...)" 1009 bool "ARMv6 based platforms (ARM11)"
1009 select ARCH_MULTI_V6_V7 1010 select ARCH_MULTI_V6_V7
1010 select CPU_V6 1011 select CPU_V6
1011 1012
1012config ARCH_MULTI_V7 1013config ARCH_MULTI_V7
1013 bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)" 1014 bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)"
1014 default y 1015 default y
1015 select ARCH_MULTI_V6_V7 1016 select ARCH_MULTI_V6_V7
1016 select ARCH_VEXPRESS 1017 select ARCH_VEXPRESS
@@ -1461,10 +1462,6 @@ config ISA_DMA
1461 bool 1462 bool
1462 select ISA_DMA_API 1463 select ISA_DMA_API
1463 1464
1464config ARCH_NO_VIRT_TO_BUS
1465 def_bool y
1466 depends on !ARCH_RPC && !ARCH_NETWINDER && !ARCH_SHARK
1467
1468# Select ISA DMA interface 1465# Select ISA DMA interface
1469config ISA_DMA_API 1466config ISA_DMA_API
1470 bool 1467 bool
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index ecfcdba2d17c..9b31f4311ea2 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -495,6 +495,7 @@ config DEBUG_IMX_UART_PORT
495 DEBUG_IMX53_UART || \ 495 DEBUG_IMX53_UART || \
496 DEBUG_IMX6Q_UART 496 DEBUG_IMX6Q_UART
497 default 1 497 default 1
498 depends on ARCH_MXC
498 help 499 help
499 Choose UART port on which kernel low-level debug messages 500 Choose UART port on which kernel low-level debug messages
500 should be output. 501 should be output.
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index aa98e641931f..a98c0d50fbbe 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -238,8 +238,32 @@
238 nand { 238 nand {
239 pinctrl_nand: nand-0 { 239 pinctrl_nand: nand-0 {
240 atmel,pins = 240 atmel,pins =
241 <3 4 0x0 0x1 /* PD5 gpio RDY pin pull_up */ 241 <3 0 0x1 0x0 /* PD0 periph A Read Enable */
242 3 5 0x0 0x1>; /* PD4 gpio enable pin pull_up */ 242 3 1 0x1 0x0 /* PD1 periph A Write Enable */
243 3 2 0x1 0x0 /* PD2 periph A Address Latch Enable */
244 3 3 0x1 0x0 /* PD3 periph A Command Latch Enable */
245 3 4 0x0 0x1 /* PD4 gpio Chip Enable pin pull_up */
246 3 5 0x0 0x1 /* PD5 gpio RDY/BUSY pin pull_up */
247 3 6 0x1 0x0 /* PD6 periph A Data bit 0 */
248 3 7 0x1 0x0 /* PD7 periph A Data bit 1 */
249 3 8 0x1 0x0 /* PD8 periph A Data bit 2 */
250 3 9 0x1 0x0 /* PD9 periph A Data bit 3 */
251 3 10 0x1 0x0 /* PD10 periph A Data bit 4 */
252 3 11 0x1 0x0 /* PD11 periph A Data bit 5 */
253 3 12 0x1 0x0 /* PD12 periph A Data bit 6 */
254 3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */
255 };
256
257 pinctrl_nand_16bits: nand_16bits-0 {
258 atmel,pins =
259 <3 14 0x1 0x0 /* PD14 periph A Data bit 8 */
260 3 15 0x1 0x0 /* PD15 periph A Data bit 9 */
261 3 16 0x1 0x0 /* PD16 periph A Data bit 10 */
262 3 17 0x1 0x0 /* PD17 periph A Data bit 11 */
263 3 18 0x1 0x0 /* PD18 periph A Data bit 12 */
264 3 19 0x1 0x0 /* PD19 periph A Data bit 13 */
265 3 20 0x1 0x0 /* PD20 periph A Data bit 14 */
266 3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */
243 }; 267 };
244 }; 268 };
245 269
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index e1347fceb5bc..1a62bcf18aa3 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -275,18 +275,27 @@
275 compatible = "arm,pl330", "arm,primecell"; 275 compatible = "arm,pl330", "arm,primecell";
276 reg = <0x12680000 0x1000>; 276 reg = <0x12680000 0x1000>;
277 interrupts = <0 35 0>; 277 interrupts = <0 35 0>;
278 #dma-cells = <1>;
279 #dma-channels = <8>;
280 #dma-requests = <32>;
278 }; 281 };
279 282
280 pdma1: pdma@12690000 { 283 pdma1: pdma@12690000 {
281 compatible = "arm,pl330", "arm,primecell"; 284 compatible = "arm,pl330", "arm,primecell";
282 reg = <0x12690000 0x1000>; 285 reg = <0x12690000 0x1000>;
283 interrupts = <0 36 0>; 286 interrupts = <0 36 0>;
287 #dma-cells = <1>;
288 #dma-channels = <8>;
289 #dma-requests = <32>;
284 }; 290 };
285 291
286 mdma1: mdma@12850000 { 292 mdma1: mdma@12850000 {
287 compatible = "arm,pl330", "arm,primecell"; 293 compatible = "arm,pl330", "arm,primecell";
288 reg = <0x12850000 0x1000>; 294 reg = <0x12850000 0x1000>;
289 interrupts = <0 34 0>; 295 interrupts = <0 34 0>;
296 #dma-cells = <1>;
297 #dma-channels = <8>;
298 #dma-requests = <1>;
290 }; 299 };
291 }; 300 };
292}; 301};
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index 5f3562ad6746..9a99755920c0 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -142,12 +142,18 @@
142 compatible = "arm,pl330", "arm,primecell"; 142 compatible = "arm,pl330", "arm,primecell";
143 reg = <0x120000 0x1000>; 143 reg = <0x120000 0x1000>;
144 interrupts = <0 34 0>; 144 interrupts = <0 34 0>;
145 #dma-cells = <1>;
146 #dma-channels = <8>;
147 #dma-requests = <32>;
145 }; 148 };
146 149
147 pdma1: pdma@121B0000 { 150 pdma1: pdma@121B0000 {
148 compatible = "arm,pl330", "arm,primecell"; 151 compatible = "arm,pl330", "arm,primecell";
149 reg = <0x121000 0x1000>; 152 reg = <0x121000 0x1000>;
150 interrupts = <0 35 0>; 153 interrupts = <0 35 0>;
154 #dma-cells = <1>;
155 #dma-channels = <8>;
156 #dma-requests = <32>;
151 }; 157 };
152 }; 158 };
153 159
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 48d00a099ce3..3d3f64d2111a 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -385,7 +385,7 @@
385 385
386 spi@7000d800 { 386 spi@7000d800 {
387 compatible = "nvidia,tegra20-slink"; 387 compatible = "nvidia,tegra20-slink";
388 reg = <0x7000d480 0x200>; 388 reg = <0x7000d800 0x200>;
389 interrupts = <0 83 0x04>; 389 interrupts = <0 83 0x04>;
390 nvidia,dma-request-selector = <&apbdma 17>; 390 nvidia,dma-request-selector = <&apbdma 17>;
391 #address-cells = <1>; 391 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 9d87a3ffe998..dbf46c272562 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -372,7 +372,7 @@
372 372
373 spi@7000d800 { 373 spi@7000d800 {
374 compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink"; 374 compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
375 reg = <0x7000d480 0x200>; 375 reg = <0x7000d800 0x200>;
376 interrupts = <0 83 0x04>; 376 interrupts = <0 83 0x04>;
377 nvidia,dma-request-selector = <&apbdma 17>; 377 nvidia,dma-request-selector = <&apbdma 17>;
378 #address-cells = <1>; 378 #address-cells = <1>;
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 31644f1978d5..79078edbb9bc 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -480,7 +480,7 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
480 evt->features = CLOCK_EVT_FEAT_ONESHOT | 480 evt->features = CLOCK_EVT_FEAT_ONESHOT |
481 CLOCK_EVT_FEAT_PERIODIC | 481 CLOCK_EVT_FEAT_PERIODIC |
482 CLOCK_EVT_FEAT_DUMMY; 482 CLOCK_EVT_FEAT_DUMMY;
483 evt->rating = 400; 483 evt->rating = 100;
484 evt->mult = 1; 484 evt->mult = 1;
485 evt->set_mode = broadcast_timer_set_mode; 485 evt->set_mode = broadcast_timer_set_mode;
486 486
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index d912e7397ecc..94b0650ea98f 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -14,31 +14,15 @@
14 14
15 .text 15 .text
16 .align 5 16 .align 5
17 .word 0
18
191: subs r2, r2, #4 @ 1 do we have enough
20 blt 5f @ 1 bytes to align with?
21 cmp r3, #2 @ 1
22 strltb r1, [ip], #1 @ 1
23 strleb r1, [ip], #1 @ 1
24 strb r1, [ip], #1 @ 1
25 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
26/*
27 * The pointer is now aligned and the length is adjusted. Try doing the
28 * memset again.
29 */
30 17
31ENTRY(memset) 18ENTRY(memset)
32/* 19 ands r3, r0, #3 @ 1 unaligned?
33 * Preserve the contents of r0 for the return value. 20 mov ip, r0 @ preserve r0 as return value
34 */ 21 bne 6f @ 1
35 mov ip, r0
36 ands r3, ip, #3 @ 1 unaligned?
37 bne 1b @ 1
38/* 22/*
39 * we know that the pointer in ip is aligned to a word boundary. 23 * we know that the pointer in ip is aligned to a word boundary.
40 */ 24 */
41 orr r1, r1, r1, lsl #8 251: orr r1, r1, r1, lsl #8
42 orr r1, r1, r1, lsl #16 26 orr r1, r1, r1, lsl #16
43 mov r3, r1 27 mov r3, r1
44 cmp r2, #16 28 cmp r2, #16
@@ -127,4 +111,13 @@ ENTRY(memset)
127 tst r2, #1 111 tst r2, #1
128 strneb r1, [ip], #1 112 strneb r1, [ip], #1
129 mov pc, lr 113 mov pc, lr
114
1156: subs r2, r2, #4 @ 1 do we have enough
116 blt 5b @ 1 bytes to align with?
117 cmp r3, #2 @ 1
118 strltb r1, [ip], #1 @ 1
119 strleb r1, [ip], #1 @ 1
120 strb r1, [ip], #1 @ 1
121 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
122 b 1b
130ENDPROC(memset) 123ENDPROC(memset)
diff --git a/arch/arm/mach-at91/include/mach/gpio.h b/arch/arm/mach-at91/include/mach/gpio.h
index eed465ab0dd7..5fc23771c154 100644
--- a/arch/arm/mach-at91/include/mach/gpio.h
+++ b/arch/arm/mach-at91/include/mach/gpio.h
@@ -209,6 +209,14 @@ extern int at91_get_gpio_value(unsigned pin);
209extern void at91_gpio_suspend(void); 209extern void at91_gpio_suspend(void);
210extern void at91_gpio_resume(void); 210extern void at91_gpio_resume(void);
211 211
212#ifdef CONFIG_PINCTRL_AT91
213extern void at91_pinctrl_gpio_suspend(void);
214extern void at91_pinctrl_gpio_resume(void);
215#else
216static inline void at91_pinctrl_gpio_suspend(void) {}
217static inline void at91_pinctrl_gpio_resume(void) {}
218#endif
219
212#endif /* __ASSEMBLY__ */ 220#endif /* __ASSEMBLY__ */
213 221
214#endif 222#endif
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c
index 8e210262aeee..e0ca59171022 100644
--- a/arch/arm/mach-at91/irq.c
+++ b/arch/arm/mach-at91/irq.c
@@ -92,23 +92,21 @@ static int at91_aic_set_wake(struct irq_data *d, unsigned value)
92 92
93void at91_irq_suspend(void) 93void at91_irq_suspend(void)
94{ 94{
95 int i = 0, bit; 95 int bit = -1;
96 96
97 if (has_aic5()) { 97 if (has_aic5()) {
98 /* disable enabled irqs */ 98 /* disable enabled irqs */
99 while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { 99 while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
100 at91_aic_write(AT91_AIC5_SSR, 100 at91_aic_write(AT91_AIC5_SSR,
101 bit & AT91_AIC5_INTSEL_MSK); 101 bit & AT91_AIC5_INTSEL_MSK);
102 at91_aic_write(AT91_AIC5_IDCR, 1); 102 at91_aic_write(AT91_AIC5_IDCR, 1);
103 i = bit;
104 } 103 }
105 /* enable wakeup irqs */ 104 /* enable wakeup irqs */
106 i = 0; 105 bit = -1;
107 while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { 106 while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
108 at91_aic_write(AT91_AIC5_SSR, 107 at91_aic_write(AT91_AIC5_SSR,
109 bit & AT91_AIC5_INTSEL_MSK); 108 bit & AT91_AIC5_INTSEL_MSK);
110 at91_aic_write(AT91_AIC5_IECR, 1); 109 at91_aic_write(AT91_AIC5_IECR, 1);
111 i = bit;
112 } 110 }
113 } else { 111 } else {
114 at91_aic_write(AT91_AIC_IDCR, *backups); 112 at91_aic_write(AT91_AIC_IDCR, *backups);
@@ -118,23 +116,21 @@ void at91_irq_suspend(void)
118 116
119void at91_irq_resume(void) 117void at91_irq_resume(void)
120{ 118{
121 int i = 0, bit; 119 int bit = -1;
122 120
123 if (has_aic5()) { 121 if (has_aic5()) {
124 /* disable wakeup irqs */ 122 /* disable wakeup irqs */
125 while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { 123 while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
126 at91_aic_write(AT91_AIC5_SSR, 124 at91_aic_write(AT91_AIC5_SSR,
127 bit & AT91_AIC5_INTSEL_MSK); 125 bit & AT91_AIC5_INTSEL_MSK);
128 at91_aic_write(AT91_AIC5_IDCR, 1); 126 at91_aic_write(AT91_AIC5_IDCR, 1);
129 i = bit;
130 } 127 }
131 /* enable irqs disabled for suspend */ 128 /* enable irqs disabled for suspend */
132 i = 0; 129 bit = -1;
133 while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { 130 while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
134 at91_aic_write(AT91_AIC5_SSR, 131 at91_aic_write(AT91_AIC5_SSR,
135 bit & AT91_AIC5_INTSEL_MSK); 132 bit & AT91_AIC5_INTSEL_MSK);
136 at91_aic_write(AT91_AIC5_IECR, 1); 133 at91_aic_write(AT91_AIC5_IECR, 1);
137 i = bit;
138 } 134 }
139 } else { 135 } else {
140 at91_aic_write(AT91_AIC_IDCR, *wakeups); 136 at91_aic_write(AT91_AIC_IDCR, *wakeups);
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index adb6db888a1f..73f1f250403a 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -201,7 +201,10 @@ extern u32 at91_slow_clock_sz;
201 201
202static int at91_pm_enter(suspend_state_t state) 202static int at91_pm_enter(suspend_state_t state)
203{ 203{
204 at91_gpio_suspend(); 204 if (of_have_populated_dt())
205 at91_pinctrl_gpio_suspend();
206 else
207 at91_gpio_suspend();
205 at91_irq_suspend(); 208 at91_irq_suspend();
206 209
207 pr_debug("AT91: PM - wake mask %08x, pm state %d\n", 210 pr_debug("AT91: PM - wake mask %08x, pm state %d\n",
@@ -286,7 +289,10 @@ static int at91_pm_enter(suspend_state_t state)
286error: 289error:
287 target_state = PM_SUSPEND_ON; 290 target_state = PM_SUSPEND_ON;
288 at91_irq_resume(); 291 at91_irq_resume();
289 at91_gpio_resume(); 292 if (of_have_populated_dt())
293 at91_pinctrl_gpio_resume();
294 else
295 at91_gpio_resume();
290 return 0; 296 return 0;
291} 297}
292 298
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index a685e9706b7b..45b7c71d9cc1 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -743,6 +743,9 @@ EXPORT_SYMBOL(edma_free_channel);
743 */ 743 */
744int edma_alloc_slot(unsigned ctlr, int slot) 744int edma_alloc_slot(unsigned ctlr, int slot)
745{ 745{
746 if (!edma_cc[ctlr])
747 return -EINVAL;
748
746 if (slot >= 0) 749 if (slot >= 0)
747 slot = EDMA_CHAN_SLOT(slot); 750 slot = EDMA_CHAN_SLOT(slot);
748 751
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index abda5a18a664..0f2111a11315 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -67,6 +67,7 @@ config ARCH_NETWINDER
67 select ISA 67 select ISA
68 select ISA_DMA 68 select ISA_DMA
69 select PCI 69 select PCI
70 select VIRT_TO_BUS
70 help 71 help
71 Say Y here if you intend to run this kernel on the Rebel.COM 72 Say Y here if you intend to run this kernel on the Rebel.COM
72 NetWinder. Information about this machine can be found at: 73 NetWinder. Information about this machine can be found at:
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index 74e3a34d78b8..e13a8fa5e62c 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -264,6 +264,7 @@ int __init mx35_clocks_init(void)
264 clk_prepare_enable(clk[gpio3_gate]); 264 clk_prepare_enable(clk[gpio3_gate]);
265 clk_prepare_enable(clk[iim_gate]); 265 clk_prepare_enable(clk[iim_gate]);
266 clk_prepare_enable(clk[emi_gate]); 266 clk_prepare_enable(clk[emi_gate]);
267 clk_prepare_enable(clk[max_gate]);
267 268
268 /* 269 /*
269 * SCC is needed to boot via mmc after a watchdog reset. The clock code 270 * SCC is needed to boot via mmc after a watchdog reset. The clock code
diff --git a/arch/arm/mach-imx/imx25-dt.c b/arch/arm/mach-imx/imx25-dt.c
index 03b65e5ea541..82348391582a 100644
--- a/arch/arm/mach-imx/imx25-dt.c
+++ b/arch/arm/mach-imx/imx25-dt.c
@@ -27,6 +27,11 @@ static const char * const imx25_dt_board_compat[] __initconst = {
27 NULL 27 NULL
28}; 28};
29 29
30static void __init imx25_timer_init(void)
31{
32 mx25_clocks_init_dt();
33}
34
30DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)") 35DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
31 .map_io = mx25_map_io, 36 .map_io = mx25_map_io,
32 .init_early = imx25_init_early, 37 .init_early = imx25_init_early,
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index d1e2d595e79c..f62b68d926f4 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/platform_device.h>
12#include <linux/gpio.h> 13#include <linux/gpio.h>
13 14
14#include <asm/mach/arch.h> 15#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 3218f1f2c0e0..e7b781d3788f 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = {
41 .lower_margin = 4, 41 .lower_margin = 4,
42 .hsync_len = 1, 42 .hsync_len = 1,
43 .vsync_len = 1, 43 .vsync_len = 1,
44 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
45 FB_SYNC_DOTCLK_FAILING_ACT,
46 }, 44 },
47}; 45};
48 46
@@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = {
59 .lower_margin = 10, 57 .lower_margin = 10,
60 .hsync_len = 10, 58 .hsync_len = 10,
61 .vsync_len = 10, 59 .vsync_len = 10,
62 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
63 FB_SYNC_DOTCLK_FAILING_ACT,
64 }, 60 },
65}; 61};
66 62
@@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = {
77 .lower_margin = 45, 73 .lower_margin = 45,
78 .hsync_len = 1, 74 .hsync_len = 1,
79 .vsync_len = 1, 75 .vsync_len = 1,
80 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT,
81 }, 76 },
82}; 77};
83 78
@@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = {
94 .lower_margin = 13, 89 .lower_margin = 13,
95 .hsync_len = 48, 90 .hsync_len = 48,
96 .vsync_len = 3, 91 .vsync_len = 3,
97 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | 92 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
98 FB_SYNC_DATA_ENABLE_HIGH_ACT |
99 FB_SYNC_DOTCLK_FAILING_ACT,
100 }, 93 },
101}; 94};
102 95
@@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = {
113 .lower_margin = 0x15, 106 .lower_margin = 0x15,
114 .hsync_len = 64, 107 .hsync_len = 64,
115 .vsync_len = 4, 108 .vsync_len = 4,
116 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | 109 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
117 FB_SYNC_DATA_ENABLE_HIGH_ACT |
118 FB_SYNC_DOTCLK_FAILING_ACT,
119 }, 110 },
120}; 111};
121 112
@@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = {
132 .lower_margin = 2, 123 .lower_margin = 2,
133 .hsync_len = 15, 124 .hsync_len = 15,
134 .vsync_len = 15, 125 .vsync_len = 15,
135 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT
136 }, 126 },
137}; 127};
138 128
@@ -259,6 +249,8 @@ static void __init imx23_evk_init(void)
259 mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes); 249 mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes);
260 mxsfb_pdata.default_bpp = 32; 250 mxsfb_pdata.default_bpp = 32;
261 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; 251 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
252 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
253 MXSFB_SYNC_DOTCLK_FAILING_ACT;
262} 254}
263 255
264static inline void enable_clk_enet_out(void) 256static inline void enable_clk_enet_out(void)
@@ -278,6 +270,8 @@ static void __init imx28_evk_init(void)
278 mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes); 270 mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes);
279 mxsfb_pdata.default_bpp = 32; 271 mxsfb_pdata.default_bpp = 32;
280 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; 272 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
273 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
274 MXSFB_SYNC_DOTCLK_FAILING_ACT;
281 275
282 mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); 276 mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0);
283} 277}
@@ -297,6 +291,7 @@ static void __init m28evk_init(void)
297 mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes); 291 mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes);
298 mxsfb_pdata.default_bpp = 16; 292 mxsfb_pdata.default_bpp = 16;
299 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; 293 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
294 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
300} 295}
301 296
302static void __init sc_sps1_init(void) 297static void __init sc_sps1_init(void)
@@ -322,6 +317,8 @@ static void __init apx4devkit_init(void)
322 mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes); 317 mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes);
323 mxsfb_pdata.default_bpp = 32; 318 mxsfb_pdata.default_bpp = 32;
324 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; 319 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
320 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
321 MXSFB_SYNC_DOTCLK_FAILING_ACT;
325} 322}
326 323
327#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0) 324#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0)
@@ -407,6 +404,7 @@ static void __init cfa10049_init(void)
407 mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes); 404 mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
408 mxsfb_pdata.default_bpp = 32; 405 mxsfb_pdata.default_bpp = 32;
409 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; 406 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
407 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
410} 408}
411 409
412static void __init cfa10037_init(void) 410static void __init cfa10037_init(void)
@@ -423,6 +421,8 @@ static void __init apf28_init(void)
423 mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes); 421 mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes);
424 mxsfb_pdata.default_bpp = 16; 422 mxsfb_pdata.default_bpp = 16;
425 mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT; 423 mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT;
424 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
425 MXSFB_SYNC_DOTCLK_FAILING_ACT;
426} 426}
427 427
428static void __init mxs_machine_init(void) 428static void __init mxs_machine_init(void)
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index fcdf52dbcc49..f051f53e35b7 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -214,11 +214,6 @@ static struct clk clk_pcmcdclk2 = {
214 .name = "pcmcdclk", 214 .name = "pcmcdclk",
215}; 215};
216 216
217static struct clk dummy_apb_pclk = {
218 .name = "apb_pclk",
219 .id = -1,
220};
221
222static struct clk *clkset_vpllsrc_list[] = { 217static struct clk *clkset_vpllsrc_list[] = {
223 [0] = &clk_fin_vpll, 218 [0] = &clk_fin_vpll,
224 [1] = &clk_sclk_hdmi27m, 219 [1] = &clk_sclk_hdmi27m,
@@ -305,18 +300,6 @@ static struct clk_ops clk_fout_apll_ops = {
305 300
306static struct clk init_clocks_off[] = { 301static struct clk init_clocks_off[] = {
307 { 302 {
308 .name = "dma",
309 .devname = "dma-pl330.0",
310 .parent = &clk_hclk_psys.clk,
311 .enable = s5pv210_clk_ip0_ctrl,
312 .ctrlbit = (1 << 3),
313 }, {
314 .name = "dma",
315 .devname = "dma-pl330.1",
316 .parent = &clk_hclk_psys.clk,
317 .enable = s5pv210_clk_ip0_ctrl,
318 .ctrlbit = (1 << 4),
319 }, {
320 .name = "rot", 303 .name = "rot",
321 .parent = &clk_hclk_dsys.clk, 304 .parent = &clk_hclk_dsys.clk,
322 .enable = s5pv210_clk_ip0_ctrl, 305 .enable = s5pv210_clk_ip0_ctrl,
@@ -573,6 +556,20 @@ static struct clk clk_hsmmc3 = {
573 .ctrlbit = (1<<19), 556 .ctrlbit = (1<<19),
574}; 557};
575 558
559static struct clk clk_pdma0 = {
560 .name = "pdma0",
561 .parent = &clk_hclk_psys.clk,
562 .enable = s5pv210_clk_ip0_ctrl,
563 .ctrlbit = (1 << 3),
564};
565
566static struct clk clk_pdma1 = {
567 .name = "pdma1",
568 .parent = &clk_hclk_psys.clk,
569 .enable = s5pv210_clk_ip0_ctrl,
570 .ctrlbit = (1 << 4),
571};
572
576static struct clk *clkset_uart_list[] = { 573static struct clk *clkset_uart_list[] = {
577 [6] = &clk_mout_mpll.clk, 574 [6] = &clk_mout_mpll.clk,
578 [7] = &clk_mout_epll.clk, 575 [7] = &clk_mout_epll.clk,
@@ -1075,6 +1072,8 @@ static struct clk *clk_cdev[] = {
1075 &clk_hsmmc1, 1072 &clk_hsmmc1,
1076 &clk_hsmmc2, 1073 &clk_hsmmc2,
1077 &clk_hsmmc3, 1074 &clk_hsmmc3,
1075 &clk_pdma0,
1076 &clk_pdma1,
1078}; 1077};
1079 1078
1080/* Clock initialisation code */ 1079/* Clock initialisation code */
@@ -1333,6 +1332,8 @@ static struct clk_lookup s5pv210_clk_lookup[] = {
1333 CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), 1332 CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
1334 CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), 1333 CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
1335 CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), 1334 CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
1335 CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0),
1336 CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1),
1336}; 1337};
1337 1338
1338void __init s5pv210_register_clocks(void) 1339void __init s5pv210_register_clocks(void)
@@ -1361,6 +1362,5 @@ void __init s5pv210_register_clocks(void)
1361 for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) 1362 for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++)
1362 s3c_disable_clocks(clk_cdev[ptr], 1); 1363 s3c_disable_clocks(clk_cdev[ptr], 1);
1363 1364
1364 s3c24xx_register_clock(&dummy_apb_pclk);
1365 s3c_pwmclk_init(); 1365 s3c_pwmclk_init();
1366} 1366}
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index 3a38f7b34b94..e373de44a8b6 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -845,7 +845,7 @@ static struct fimc_source_info goni_camera_sensors[] = {
845 .mux_id = 0, 845 .mux_id = 0,
846 .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING | 846 .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
847 V4L2_MBUS_VSYNC_ACTIVE_LOW, 847 V4L2_MBUS_VSYNC_ACTIVE_LOW,
848 .bus_type = FIMC_BUS_TYPE_ITU_601, 848 .fimc_bus_type = FIMC_BUS_TYPE_ITU_601,
849 .board_info = &noon010pc30_board_info, 849 .board_info = &noon010pc30_board_info,
850 .i2c_bus_num = 0, 850 .i2c_bus_num = 0,
851 .clk_frequency = 16000000UL, 851 .clk_frequency = 16000000UL,
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index cdcb799e802f..fec49ebc359a 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -32,6 +32,7 @@
32#include <linux/smsc911x.h> 32#include <linux/smsc911x.h>
33#include <linux/spi/spi.h> 33#include <linux/spi/spi.h>
34#include <linux/spi/sh_hspi.h> 34#include <linux/spi/sh_hspi.h>
35#include <linux/mmc/host.h>
35#include <linux/mmc/sh_mobile_sdhi.h> 36#include <linux/mmc/sh_mobile_sdhi.h>
36#include <linux/mfd/tmio.h> 37#include <linux/mfd/tmio.h>
37#include <linux/usb/otg.h> 38#include <linux/usb/otg.h>
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 6828ef6ce80e..a0bd8a755bdf 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -576,7 +576,7 @@ load_ind:
576 /* x = ((*(frame + k)) & 0xf) << 2; */ 576 /* x = ((*(frame + k)) & 0xf) << 2; */
577 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; 577 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
578 /* the interpreter should deal with the negative K */ 578 /* the interpreter should deal with the negative K */
579 if (k < 0) 579 if ((int)k < 0)
580 return -1; 580 return -1;
581 /* offset in r1: we might have to take the slow path */ 581 /* offset in r1: we might have to take the slow path */
582 emit_mov_i(r_off, k, ctx); 582 emit_mov_i(r_off, k, ctx);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index fd70a68387eb..9b6d19f74078 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -9,7 +9,6 @@ config ARM64
9 select CLONE_BACKWARDS 9 select CLONE_BACKWARDS
10 select COMMON_CLK 10 select COMMON_CLK
11 select GENERIC_CLOCKEVENTS 11 select GENERIC_CLOCKEVENTS
12 select GENERIC_HARDIRQS_NO_DEPRECATED
13 select GENERIC_IOMAP 12 select GENERIC_IOMAP
14 select GENERIC_IRQ_PROBE 13 select GENERIC_IRQ_PROBE
15 select GENERIC_IRQ_SHOW 14 select GENERIC_IRQ_SHOW
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 51493430f142..1a6bfe954d49 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -6,17 +6,6 @@ config FRAME_POINTER
6 bool 6 bool
7 default y 7 default y
8 8
9config DEBUG_ERRORS
10 bool "Verbose kernel error messages"
11 depends on DEBUG_KERNEL
12 help
13 This option controls verbose debugging information which can be
14 printed when the kernel detects an internal error. This debugging
15 information is useful to kernel hackers when tracking down problems,
16 but mostly meaningless to other people. It's safe to say Y unless
17 you are concerned with the code size or don't want to see these
18 messages.
19
20config DEBUG_STACK_USAGE 9config DEBUG_STACK_USAGE
21 bool "Enable stack utilization instrumentation" 10 bool "Enable stack utilization instrumentation"
22 depends on DEBUG_KERNEL 11 depends on DEBUG_KERNEL
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 9212c7880da7..09bef29f3a09 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -82,4 +82,3 @@ CONFIG_DEBUG_KERNEL=y
82CONFIG_DEBUG_INFO=y 82CONFIG_DEBUG_INFO=y
83# CONFIG_FTRACE is not set 83# CONFIG_FTRACE is not set
84CONFIG_ATOMIC64_SELFTEST=y 84CONFIG_ATOMIC64_SELFTEST=y
85CONFIG_DEBUG_ERRORS=y
diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/asm/ucontext.h
index bde960720892..42e04c877428 100644
--- a/arch/arm64/include/asm/ucontext.h
+++ b/arch/arm64/include/asm/ucontext.h
@@ -22,7 +22,7 @@ struct ucontext {
22 stack_t uc_stack; 22 stack_t uc_stack;
23 sigset_t uc_sigmask; 23 sigset_t uc_sigmask;
24 /* glibc uses a 1024-bit sigset_t */ 24 /* glibc uses a 1024-bit sigset_t */
25 __u8 __unused[(1024 - sizeof(sigset_t)) / 8]; 25 __u8 __unused[1024 / 8 - sizeof(sigset_t)];
26 /* last for future expansion */ 26 /* last for future expansion */
27 struct sigcontext uc_mcontext; 27 struct sigcontext uc_mcontext;
28}; 28};
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index cef3925eaf60..aa3e948f7885 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -40,7 +40,9 @@ EXPORT_SYMBOL(__copy_to_user);
40EXPORT_SYMBOL(__clear_user); 40EXPORT_SYMBOL(__clear_user);
41 41
42 /* bitops */ 42 /* bitops */
43#ifdef CONFIG_SMP
43EXPORT_SYMBOL(__atomic_hash); 44EXPORT_SYMBOL(__atomic_hash);
45#endif
44 46
45 /* physical memory */ 47 /* physical memory */
46EXPORT_SYMBOL(memstart_addr); 48EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 7f4f3673f2bc..e393174fe859 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -549,7 +549,6 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
549 sigset_t *set, struct pt_regs *regs) 549 sigset_t *set, struct pt_regs *regs)
550{ 550{
551 struct compat_rt_sigframe __user *frame; 551 struct compat_rt_sigframe __user *frame;
552 compat_stack_t stack;
553 int err = 0; 552 int err = 0;
554 553
555 frame = compat_get_sigframe(ka, regs, sizeof(*frame)); 554 frame = compat_get_sigframe(ka, regs, sizeof(*frame));
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 224b44ab534e..70b8cd4021c4 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
261void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) 261void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
262{ 262{
263 unsigned long size, mask; 263 unsigned long size, mask;
264 bool page64k = IS_ENABLED(ARM64_64K_PAGES); 264 bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
265 pgd_t *pgd; 265 pgd_t *pgd;
266 pud_t *pud; 266 pud_t *pud;
267 pmd_t *pmd; 267 pmd_t *pmd;
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index e34f565f595a..6f7dc8b7b35c 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -291,7 +291,6 @@ cpu_idle (void)
291 } 291 }
292 292
293 if (!need_resched()) { 293 if (!need_resched()) {
294 void (*idle)(void);
295#ifdef CONFIG_SMP 294#ifdef CONFIG_SMP
296 min_xtp(); 295 min_xtp();
297#endif 296#endif
@@ -299,9 +298,7 @@ cpu_idle (void)
299 if (mark_idle) 298 if (mark_idle)
300 (*mark_idle)(1); 299 (*mark_idle)(1);
301 300
302 if (!idle) 301 default_idle();
303 idle = default_idle;
304 (*idle)();
305 if (mark_idle) 302 if (mark_idle)
306 (*mark_idle)(0); 303 (*mark_idle)(0);
307#ifdef CONFIG_SMP 304#ifdef CONFIG_SMP
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 80821512e9cc..ea5bb045983a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -90,6 +90,7 @@ config GENERIC_GPIO
90config PPC 90config PPC
91 bool 91 bool
92 default y 92 default y
93 select BINFMT_ELF
93 select OF 94 select OF
94 select OF_EARLY_FLATTREE 95 select OF_EARLY_FLATTREE
95 select HAVE_FTRACE_MCOUNT_RECORD 96 select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 2fdb47a19efd..b59e06f507ea 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -343,17 +343,16 @@ extern void slb_set_size(u16 size);
343/* 343/*
344 * VSID allocation (256MB segment) 344 * VSID allocation (256MB segment)
345 * 345 *
346 * We first generate a 38-bit "proto-VSID". For kernel addresses this 346 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
347 * is equal to the ESID | 1 << 37, for user addresses it is: 347 * from mmu context id and effective segment id of the address.
348 * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
349 * 348 *
350 * This splits the proto-VSID into the below range 349 * For user processes max context id is limited to ((1ul << 19) - 5)
351 * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range 350 * for kernel space, we use the top 4 context ids to map address as below
352 * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range 351 * NOTE: each context only support 64TB now.
353 * 352 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
354 * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 353 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
355 * That is, we assign half of the space to user processes and half 354 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
356 * to the kernel. 355 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
357 * 356 *
358 * The proto-VSIDs are then scrambled into real VSIDs with the 357 * The proto-VSIDs are then scrambled into real VSIDs with the
359 * multiplicative hash: 358 * multiplicative hash:
@@ -363,41 +362,49 @@ extern void slb_set_size(u16 size);
363 * VSID_MULTIPLIER is prime, so in particular it is 362 * VSID_MULTIPLIER is prime, so in particular it is
364 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. 363 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
365 * Because the modulus is 2^n-1 we can compute it efficiently without 364 * Because the modulus is 2^n-1 we can compute it efficiently without
366 * a divide or extra multiply (see below). 365 * a divide or extra multiply (see below). The scramble function gives
367 * 366 * robust scattering in the hash table (at least based on some initial
368 * This scheme has several advantages over older methods: 367 * results).
369 *
370 * - We have VSIDs allocated for every kernel address
371 * (i.e. everything above 0xC000000000000000), except the very top
372 * segment, which simplifies several things.
373 * 368 *
374 * - We allow for USER_ESID_BITS significant bits of ESID and 369 * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
375 * CONTEXT_BITS bits of context for user addresses. 370 * bad address. This enables us to consolidate bad address handling in
376 * i.e. 64T (46 bits) of address space for up to half a million contexts. 371 * hash_page.
377 * 372 *
378 * - The scramble function gives robust scattering in the hash 373 * We also need to avoid the last segment of the last context, because that
379 * table (at least based on some initial results). The previous 374 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
380 * method was more susceptible to pathological cases giving excessive 375 * because of the modulo operation in vsid scramble. But the vmemmap
381 * hash collisions. 376 * (which is what uses region 0xf) will never be close to 64TB in size
377 * (it's 56 bytes per page of system memory).
382 */ 378 */
383 379
380#define CONTEXT_BITS 19
381#define ESID_BITS 18
382#define ESID_BITS_1T 6
383
384/*
385 * 256MB segment
386 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
387 * available for user + kernel mapping. The top 4 contexts are used for
388 * kernel mapping. Each segment contains 2^28 bytes. Each
389 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
390 * (19 == 37 + 28 - 46).
391 */
392#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
393
384/* 394/*
385 * This should be computed such that protovosid * vsid_mulitplier 395 * This should be computed such that protovosid * vsid_mulitplier
386 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus 396 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
387 */ 397 */
388#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ 398#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
389#define VSID_BITS_256M 38 399#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
390#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) 400#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
391 401
392#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ 402#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
393#define VSID_BITS_1T 26 403#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
394#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) 404#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
395 405
396#define CONTEXT_BITS 19
397#define USER_ESID_BITS 18
398#define USER_ESID_BITS_1T 6
399 406
400#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) 407#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
401 408
402/* 409/*
403 * This macro generates asm code to compute the VSID scramble 410 * This macro generates asm code to compute the VSID scramble
@@ -421,7 +428,8 @@ extern void slb_set_size(u16 size);
421 srdi rx,rt,VSID_BITS_##size; \ 428 srdi rx,rt,VSID_BITS_##size; \
422 clrldi rt,rt,(64-VSID_BITS_##size); \ 429 clrldi rt,rt,(64-VSID_BITS_##size); \
423 add rt,rt,rx; /* add high and low bits */ \ 430 add rt,rt,rx; /* add high and low bits */ \
424 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ 431 /* NOTE: explanation based on VSID_BITS_##size = 36 \
432 * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
425 * 2^36-1+2^28-1. That in particular means that if r3 >= \ 433 * 2^36-1+2^28-1. That in particular means that if r3 >= \
426 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ 434 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
427 * the bit clear, r3 already has the answer we want, if it \ 435 * the bit clear, r3 already has the answer we want, if it \
@@ -513,34 +521,6 @@ typedef struct {
513 }) 521 })
514#endif /* 1 */ 522#endif /* 1 */
515 523
516/*
517 * This is only valid for addresses >= PAGE_OFFSET
518 * The proto-VSID space is divided into two class
519 * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
520 * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
521 *
522 * With KERNEL_START at 0xc000000000000000, the proto vsid for
523 * the kernel ends up with 0xc00000000 (36 bits). With 64TB
524 * support we need to have kernel proto-VSID in the
525 * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
526 */
527static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
528{
529 unsigned long proto_vsid;
530 /*
531 * We need to make sure proto_vsid for the kernel is
532 * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
533 */
534 if (ssize == MMU_SEGSIZE_256M) {
535 proto_vsid = ea >> SID_SHIFT;
536 proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
537 return vsid_scramble(proto_vsid, 256M);
538 }
539 proto_vsid = ea >> SID_SHIFT_1T;
540 proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
541 return vsid_scramble(proto_vsid, 1T);
542}
543
544/* Returns the segment size indicator for a user address */ 524/* Returns the segment size indicator for a user address */
545static inline int user_segment_size(unsigned long addr) 525static inline int user_segment_size(unsigned long addr)
546{ 526{
@@ -550,17 +530,41 @@ static inline int user_segment_size(unsigned long addr)
550 return MMU_SEGSIZE_256M; 530 return MMU_SEGSIZE_256M;
551} 531}
552 532
553/* This is only valid for user addresses (which are below 2^44) */
554static inline unsigned long get_vsid(unsigned long context, unsigned long ea, 533static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
555 int ssize) 534 int ssize)
556{ 535{
536 /*
537 * Bad address. We return VSID 0 for that
538 */
539 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
540 return 0;
541
557 if (ssize == MMU_SEGSIZE_256M) 542 if (ssize == MMU_SEGSIZE_256M)
558 return vsid_scramble((context << USER_ESID_BITS) 543 return vsid_scramble((context << ESID_BITS)
559 | (ea >> SID_SHIFT), 256M); 544 | (ea >> SID_SHIFT), 256M);
560 return vsid_scramble((context << USER_ESID_BITS_1T) 545 return vsid_scramble((context << ESID_BITS_1T)
561 | (ea >> SID_SHIFT_1T), 1T); 546 | (ea >> SID_SHIFT_1T), 1T);
562} 547}
563 548
549/*
550 * This is only valid for addresses >= PAGE_OFFSET
551 *
552 * For kernel space, we use the top 4 context ids to map address as below
553 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
554 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
555 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
556 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
557 */
558static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
559{
560 unsigned long context;
561
562 /*
563 * kernel take the top 4 context from the available range
564 */
565 context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
566 return get_vsid(context, ea, ssize);
567}
564#endif /* __ASSEMBLY__ */ 568#endif /* __ASSEMBLY__ */
565 569
566#endif /* _ASM_POWERPC_MMU_HASH64_H_ */ 570#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 75a3d71b895d..19599ef352bc 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
275 .cpu_features = CPU_FTRS_PPC970, 275 .cpu_features = CPU_FTRS_PPC970,
276 .cpu_user_features = COMMON_USER_POWER4 | 276 .cpu_user_features = COMMON_USER_POWER4 |
277 PPC_FEATURE_HAS_ALTIVEC_COMP, 277 PPC_FEATURE_HAS_ALTIVEC_COMP,
278 .mmu_features = MMU_FTR_HPTE_TABLE, 278 .mmu_features = MMU_FTRS_PPC970,
279 .icache_bsize = 128, 279 .icache_bsize = 128,
280 .dcache_bsize = 128, 280 .dcache_bsize = 128,
281 .num_pmcs = 8, 281 .num_pmcs = 8,
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index f3eab8594d9f..d44a571e45a7 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -23,8 +23,10 @@
23#include <asm/code-patching.h> 23#include <asm/code-patching.h>
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25 25
26#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
26extern void epapr_ev_idle(void); 27extern void epapr_ev_idle(void);
27extern u32 epapr_ev_idle_start[]; 28extern u32 epapr_ev_idle_start[];
29#endif
28 30
29bool epapr_paravirt_enabled; 31bool epapr_paravirt_enabled;
30 32
@@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void)
47 49
48 for (i = 0; i < (len / 4); i++) { 50 for (i = 0; i < (len / 4); i++) {
49 patch_instruction(epapr_hypercall_start + i, insts[i]); 51 patch_instruction(epapr_hypercall_start + i, insts[i]);
52#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
50 patch_instruction(epapr_ev_idle_start + i, insts[i]); 53 patch_instruction(epapr_ev_idle_start + i, insts[i]);
54#endif
51 } 55 }
52 56
57#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
53 if (of_get_property(hyper_node, "has-idle", NULL)) 58 if (of_get_property(hyper_node, "has-idle", NULL))
54 ppc_md.power_save = epapr_ev_idle; 59 ppc_md.power_save = epapr_ev_idle;
60#endif
55 61
56 epapr_paravirt_enabled = true; 62 epapr_paravirt_enabled = true;
57 63
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 87ef8f5ee5bc..56bd92362ce1 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1066,78 +1066,6 @@ unrecov_user_slb:
1066#endif /* __DISABLED__ */ 1066#endif /* __DISABLED__ */
1067 1067
1068 1068
1069/*
1070 * r13 points to the PACA, r9 contains the saved CR,
1071 * r12 contain the saved SRR1, SRR0 is still ready for return
1072 * r3 has the faulting address
1073 * r9 - r13 are saved in paca->exslb.
1074 * r3 is saved in paca->slb_r3
1075 * We assume we aren't going to take any exceptions during this procedure.
1076 */
1077_GLOBAL(slb_miss_realmode)
1078 mflr r10
1079#ifdef CONFIG_RELOCATABLE
1080 mtctr r11
1081#endif
1082
1083 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1084 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1085
1086 bl .slb_allocate_realmode
1087
1088 /* All done -- return from exception. */
1089
1090 ld r10,PACA_EXSLB+EX_LR(r13)
1091 ld r3,PACA_EXSLB+EX_R3(r13)
1092 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1093
1094 mtlr r10
1095
1096 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1097 beq- 2f
1098
1099.machine push
1100.machine "power4"
1101 mtcrf 0x80,r9
1102 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1103.machine pop
1104
1105 RESTORE_PPR_PACA(PACA_EXSLB, r9)
1106 ld r9,PACA_EXSLB+EX_R9(r13)
1107 ld r10,PACA_EXSLB+EX_R10(r13)
1108 ld r11,PACA_EXSLB+EX_R11(r13)
1109 ld r12,PACA_EXSLB+EX_R12(r13)
1110 ld r13,PACA_EXSLB+EX_R13(r13)
1111 rfid
1112 b . /* prevent speculative execution */
1113
11142: mfspr r11,SPRN_SRR0
1115 ld r10,PACAKBASE(r13)
1116 LOAD_HANDLER(r10,unrecov_slb)
1117 mtspr SPRN_SRR0,r10
1118 ld r10,PACAKMSR(r13)
1119 mtspr SPRN_SRR1,r10
1120 rfid
1121 b .
1122
1123unrecov_slb:
1124 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1125 DISABLE_INTS
1126 bl .save_nvgprs
11271: addi r3,r1,STACK_FRAME_OVERHEAD
1128 bl .unrecoverable_exception
1129 b 1b
1130
1131
1132#ifdef CONFIG_PPC_970_NAP
1133power4_fixup_nap:
1134 andc r9,r9,r10
1135 std r9,TI_LOCAL_FLAGS(r11)
1136 ld r10,_LINK(r1) /* make idle task do the */
1137 std r10,_NIP(r1) /* equivalent of a blr */
1138 blr
1139#endif
1140
1141 .align 7 1069 .align 7
1142 .globl alignment_common 1070 .globl alignment_common
1143alignment_common: 1071alignment_common:
@@ -1336,6 +1264,78 @@ _GLOBAL(opal_mc_secondary_handler)
1336 1264
1337 1265
1338/* 1266/*
1267 * r13 points to the PACA, r9 contains the saved CR,
1268 * r12 contain the saved SRR1, SRR0 is still ready for return
1269 * r3 has the faulting address
1270 * r9 - r13 are saved in paca->exslb.
1271 * r3 is saved in paca->slb_r3
1272 * We assume we aren't going to take any exceptions during this procedure.
1273 */
1274_GLOBAL(slb_miss_realmode)
1275 mflr r10
1276#ifdef CONFIG_RELOCATABLE
1277 mtctr r11
1278#endif
1279
1280 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1281 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1282
1283 bl .slb_allocate_realmode
1284
1285 /* All done -- return from exception. */
1286
1287 ld r10,PACA_EXSLB+EX_LR(r13)
1288 ld r3,PACA_EXSLB+EX_R3(r13)
1289 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1290
1291 mtlr r10
1292
1293 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1294 beq- 2f
1295
1296.machine push
1297.machine "power4"
1298 mtcrf 0x80,r9
1299 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1300.machine pop
1301
1302 RESTORE_PPR_PACA(PACA_EXSLB, r9)
1303 ld r9,PACA_EXSLB+EX_R9(r13)
1304 ld r10,PACA_EXSLB+EX_R10(r13)
1305 ld r11,PACA_EXSLB+EX_R11(r13)
1306 ld r12,PACA_EXSLB+EX_R12(r13)
1307 ld r13,PACA_EXSLB+EX_R13(r13)
1308 rfid
1309 b . /* prevent speculative execution */
1310
13112: mfspr r11,SPRN_SRR0
1312 ld r10,PACAKBASE(r13)
1313 LOAD_HANDLER(r10,unrecov_slb)
1314 mtspr SPRN_SRR0,r10
1315 ld r10,PACAKMSR(r13)
1316 mtspr SPRN_SRR1,r10
1317 rfid
1318 b .
1319
1320unrecov_slb:
1321 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1322 DISABLE_INTS
1323 bl .save_nvgprs
13241: addi r3,r1,STACK_FRAME_OVERHEAD
1325 bl .unrecoverable_exception
1326 b 1b
1327
1328
1329#ifdef CONFIG_PPC_970_NAP
1330power4_fixup_nap:
1331 andc r9,r9,r10
1332 std r9,TI_LOCAL_FLAGS(r11)
1333 ld r10,_LINK(r1) /* make idle task do the */
1334 std r10,_NIP(r1) /* equivalent of a blr */
1335 blr
1336#endif
1337
1338/*
1339 * Hash table stuff 1339 * Hash table stuff
1340 */ 1340 */
1341 .align 7 1341 .align 7
@@ -1452,20 +1452,36 @@ do_ste_alloc:
1452_GLOBAL(do_stab_bolted) 1452_GLOBAL(do_stab_bolted)
1453 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1453 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1454 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 1454 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1455 mfspr r11,SPRN_DAR /* ea */
1455 1456
1457 /*
1458 * check for bad kernel/user address
1459 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
1460 */
1461 rldicr. r9,r11,4,(63 - 46 - 4)
1462 li r9,0 /* VSID = 0 for bad address */
1463 bne- 0f
1464
1465 /*
1466 * Calculate VSID:
1467 * This is the kernel vsid, we take the top for context from
1468 * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
1469 * Here we know that (ea >> 60) == 0xc
1470 */
1471 lis r9,(MAX_USER_CONTEXT + 1)@ha
1472 addi r9,r9,(MAX_USER_CONTEXT + 1)@l
1473
1474 srdi r10,r11,SID_SHIFT
1475 rldimi r10,r9,ESID_BITS,0 /* proto vsid */
1476 ASM_VSID_SCRAMBLE(r10, r9, 256M)
1477 rldic r9,r10,12,16 /* r9 = vsid << 12 */
1478
14790:
1456 /* Hash to the primary group */ 1480 /* Hash to the primary group */
1457 ld r10,PACASTABVIRT(r13) 1481 ld r10,PACASTABVIRT(r13)
1458 mfspr r11,SPRN_DAR 1482 srdi r11,r11,SID_SHIFT
1459 srdi r11,r11,28
1460 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1483 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1461 1484
1462 /* Calculate VSID */
1463 /* This is a kernel address, so protovsid = ESID | 1 << 37 */
1464 li r9,0x1
1465 rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
1466 ASM_VSID_SCRAMBLE(r11, r9, 256M)
1467 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1468
1469 /* Search the primary group for a free entry */ 1485 /* Search the primary group for a free entry */
14701: ld r11,0(r10) /* Test valid bit of the current ste */ 14861: ld r11,0(r10) /* Test valid bit of the current ste */
1471 andi. r11,r11,0x80 1487 andi. r11,r11,0x80
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 7f7fb7fd991b..13f8d168b3f1 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2832,11 +2832,13 @@ static void unreloc_toc(void)
2832{ 2832{
2833} 2833}
2834#else 2834#else
2835static void __reloc_toc(void *tocstart, unsigned long offset, 2835static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
2836 unsigned long nr_entries)
2837{ 2836{
2838 unsigned long i; 2837 unsigned long i;
2839 unsigned long *toc_entry = (unsigned long *)tocstart; 2838 unsigned long *toc_entry;
2839
2840 /* Get the start of the TOC by using r2 directly. */
2841 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
2840 2842
2841 for (i = 0; i < nr_entries; i++) { 2843 for (i = 0; i < nr_entries; i++) {
2842 *toc_entry = *toc_entry + offset; 2844 *toc_entry = *toc_entry + offset;
@@ -2850,8 +2852,7 @@ static void reloc_toc(void)
2850 unsigned long nr_entries = 2852 unsigned long nr_entries =
2851 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 2853 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2852 2854
2853 /* Need to add offset to get at __prom_init_toc_start */ 2855 __reloc_toc(offset, nr_entries);
2854 __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries);
2855 2856
2856 mb(); 2857 mb();
2857} 2858}
@@ -2864,8 +2865,7 @@ static void unreloc_toc(void)
2864 2865
2865 mb(); 2866 mb();
2866 2867
2867 /* __prom_init_toc_start has been relocated, no need to add offset */ 2868 __reloc_toc(-offset, nr_entries);
2868 __reloc_toc(__prom_init_toc_start, -offset, nr_entries);
2869} 2869}
2870#endif 2870#endif
2871#endif 2871#endif
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 245c1b6a0858..f9b30c68ba47 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
1428 1428
1429 brk.address = bp_info->addr & ~7UL; 1429 brk.address = bp_info->addr & ~7UL;
1430 brk.type = HW_BRK_TYPE_TRANSLATE; 1430 brk.type = HW_BRK_TYPE_TRANSLATE;
1431 brk.len = 8;
1431 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) 1432 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1432 brk.type |= HW_BRK_TYPE_READ; 1433 brk.type |= HW_BRK_TYPE_READ;
1433 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) 1434 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index ead58e317294..5d7d29a313eb 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
326 vcpu3s->context_id[0] = err; 326 vcpu3s->context_id[0] = err;
327 327
328 vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) 328 vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
329 << USER_ESID_BITS) - 1; 329 << ESID_BITS) - 1;
330 vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; 330 vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS;
331 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; 331 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
332 332
333 kvmppc_mmu_hpte_init(vcpu); 333 kvmppc_mmu_hpte_init(vcpu);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1b6e1271719f..f410c3e12c1e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
195 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); 195 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
196 unsigned long tprot = prot; 196 unsigned long tprot = prot;
197 197
198 /*
199 * If we hit a bad address return error.
200 */
201 if (!vsid)
202 return -1;
198 /* Make kernel text executable */ 203 /* Make kernel text executable */
199 if (overlaps_kernel_text(vaddr, vaddr + step)) 204 if (overlaps_kernel_text(vaddr, vaddr + step))
200 tprot &= ~HPTE_R_N; 205 tprot &= ~HPTE_R_N;
@@ -759,6 +764,8 @@ void __init early_init_mmu(void)
759 /* Initialize stab / SLB management */ 764 /* Initialize stab / SLB management */
760 if (mmu_has_feature(MMU_FTR_SLB)) 765 if (mmu_has_feature(MMU_FTR_SLB))
761 slb_initialize(); 766 slb_initialize();
767 else
768 stab_initialize(get_paca()->stab_real);
762} 769}
763 770
764#ifdef CONFIG_SMP 771#ifdef CONFIG_SMP
@@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
922 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", 929 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
923 ea, access, trap); 930 ea, access, trap);
924 931
925 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
926 DBG_LOW(" out of pgtable range !\n");
927 return 1;
928 }
929
930 /* Get region & vsid */ 932 /* Get region & vsid */
931 switch (REGION_ID(ea)) { 933 switch (REGION_ID(ea)) {
932 case USER_REGION_ID: 934 case USER_REGION_ID:
@@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
957 } 959 }
958 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); 960 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
959 961
962 /* Bad address. */
963 if (!vsid) {
964 DBG_LOW("Bad address!\n");
965 return 1;
966 }
960 /* Get pgdir */ 967 /* Get pgdir */
961 pgdir = mm->pgd; 968 pgdir = mm->pgd;
962 if (pgdir == NULL) 969 if (pgdir == NULL)
@@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1126 /* Get VSID */ 1133 /* Get VSID */
1127 ssize = user_segment_size(ea); 1134 ssize = user_segment_size(ea);
1128 vsid = get_vsid(mm->context.id, ea, ssize); 1135 vsid = get_vsid(mm->context.id, ea, ssize);
1136 if (!vsid)
1137 return;
1129 1138
1130 /* Hash doesn't like irqs */ 1139 /* Hash doesn't like irqs */
1131 local_irq_save(flags); 1140 local_irq_save(flags);
@@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1233 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); 1242 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1234 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 1243 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
1235 1244
1245 /* Don't create HPTE entries for bad address */
1246 if (!vsid)
1247 return;
1236 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), 1248 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
1237 mode, HPTE_V_BOLTED, 1249 mode, HPTE_V_BOLTED,
1238 mmu_linear_psize, mmu_kernel_ssize); 1250 mmu_linear_psize, mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 40bc5b0ace54..d1d1b92c5b99 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -29,15 +29,6 @@
29static DEFINE_SPINLOCK(mmu_context_lock); 29static DEFINE_SPINLOCK(mmu_context_lock);
30static DEFINE_IDA(mmu_context_ida); 30static DEFINE_IDA(mmu_context_ida);
31 31
32/*
33 * 256MB segment
34 * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
35 * available for user mappings. Each segment contains 2^28 bytes. Each
36 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
37 * (19 == 37 + 28 - 46).
38 */
39#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
40
41int __init_new_context(void) 32int __init_new_context(void)
42{ 33{
43 int index; 34 int index;
@@ -56,7 +47,7 @@ again:
56 else if (err) 47 else if (err)
57 return err; 48 return err;
58 49
59 if (index > MAX_CONTEXT) { 50 if (index > MAX_USER_CONTEXT) {
60 spin_lock(&mmu_context_lock); 51 spin_lock(&mmu_context_lock);
61 ida_remove(&mmu_context_ida, index); 52 ida_remove(&mmu_context_ida, index);
62 spin_unlock(&mmu_context_lock); 53 spin_unlock(&mmu_context_lock);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index e212a271c7a4..654258f165ae 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -61,7 +61,7 @@
61#endif 61#endif
62 62
63#ifdef CONFIG_PPC_STD_MMU_64 63#ifdef CONFIG_PPC_STD_MMU_64
64#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) 64#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
65#error TASK_SIZE_USER64 exceeds user VSID range 65#error TASK_SIZE_USER64 exceeds user VSID range
66#endif 66#endif
67#endif 67#endif
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 1a16ca227757..17aa6dfceb34 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -31,10 +31,15 @@
31 * No other registers are examined or changed. 31 * No other registers are examined or changed.
32 */ 32 */
33_GLOBAL(slb_allocate_realmode) 33_GLOBAL(slb_allocate_realmode)
34 /* r3 = faulting address */ 34 /*
35 * check for bad kernel/user address
36 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
37 */
38 rldicr. r9,r3,4,(63 - 46 - 4)
39 bne- 8f
35 40
36 srdi r9,r3,60 /* get region */ 41 srdi r9,r3,60 /* get region */
37 srdi r10,r3,28 /* get esid */ 42 srdi r10,r3,SID_SHIFT /* get esid */
38 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ 43 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
39 44
40 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ 45 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
@@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode)
56 */ 61 */
57_GLOBAL(slb_miss_kernel_load_linear) 62_GLOBAL(slb_miss_kernel_load_linear)
58 li r11,0 63 li r11,0
59 li r9,0x1
60 /* 64 /*
61 * for 1T we shift 12 bits more. slb_finish_load_1T will do 65 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
62 * the necessary adjustment 66 * r9 = region id.
63 */ 67 */
64 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 68 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
69 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
70
71
65BEGIN_FTR_SECTION 72BEGIN_FTR_SECTION
66 b slb_finish_load 73 b slb_finish_load
67END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 74END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
91 _GLOBAL(slb_miss_kernel_load_io) 98 _GLOBAL(slb_miss_kernel_load_io)
92 li r11,0 99 li r11,0
936: 1006:
94 li r9,0x1
95 /* 101 /*
96 * for 1T we shift 12 bits more. slb_finish_load_1T will do 102 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
97 * the necessary adjustment 103 * r9 = region id.
98 */ 104 */
99 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 105 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
106 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
107
100BEGIN_FTR_SECTION 108BEGIN_FTR_SECTION
101 b slb_finish_load 109 b slb_finish_load
102END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 110END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
103 b slb_finish_load_1T 111 b slb_finish_load_1T
104 112
1050: /* user address: proto-VSID = context << 15 | ESID. First check 1130:
106 * if the address is within the boundaries of the user region
107 */
108 srdi. r9,r10,USER_ESID_BITS
109 bne- 8f /* invalid ea bits set */
110
111
112 /* when using slices, we extract the psize off the slice bitmaps 114 /* when using slices, we extract the psize off the slice bitmaps
113 * and then we need to get the sllp encoding off the mmu_psize_defs 115 * and then we need to get the sllp encoding off the mmu_psize_defs
114 * array. 116 * array.
@@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
164 ld r9,PACACONTEXTID(r13) 166 ld r9,PACACONTEXTID(r13)
165BEGIN_FTR_SECTION 167BEGIN_FTR_SECTION
166 cmpldi r10,0x1000 168 cmpldi r10,0x1000
167END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
168 rldimi r10,r9,USER_ESID_BITS,0
169BEGIN_FTR_SECTION
170 bge slb_finish_load_1T 169 bge slb_finish_load_1T
171END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 170END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
172 b slb_finish_load 171 b slb_finish_load
173 172
1748: /* invalid EA */ 1738: /* invalid EA */
175 li r10,0 /* BAD_VSID */ 174 li r10,0 /* BAD_VSID */
175 li r9,0 /* BAD_VSID */
176 li r11,SLB_VSID_USER /* flags don't much matter */ 176 li r11,SLB_VSID_USER /* flags don't much matter */
177 b slb_finish_load 177 b slb_finish_load
178 178
@@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user)
221 221
222 /* get context to calculate proto-VSID */ 222 /* get context to calculate proto-VSID */
223 ld r9,PACACONTEXTID(r13) 223 ld r9,PACACONTEXTID(r13)
224 rldimi r10,r9,USER_ESID_BITS,0
225
226 /* fall through slb_finish_load */ 224 /* fall through slb_finish_load */
227 225
228#endif /* __DISABLED__ */ 226#endif /* __DISABLED__ */
@@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user)
231/* 229/*
232 * Finish loading of an SLB entry and return 230 * Finish loading of an SLB entry and return
233 * 231 *
234 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET 232 * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
235 */ 233 */
236slb_finish_load: 234slb_finish_load:
235 rldimi r10,r9,ESID_BITS,0
237 ASM_VSID_SCRAMBLE(r10,r9,256M) 236 ASM_VSID_SCRAMBLE(r10,r9,256M)
238 /* 237 /*
239 * bits above VSID_BITS_256M need to be ignored from r10 238 * bits above VSID_BITS_256M need to be ignored from r10
@@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size)
298/* 297/*
299 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. 298 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
300 * 299 *
301 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 300 * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
302 */ 301 */
303slb_finish_load_1T: 302slb_finish_load_1T:
304 srdi r10,r10,40-28 /* get 1T ESID */ 303 srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
304 rldimi r10,r9,ESID_BITS_1T,0
305 ASM_VSID_SCRAMBLE(r10,r9,1T) 305 ASM_VSID_SCRAMBLE(r10,r9,1T)
306 /* 306 /*
307 * bits above VSID_BITS_1T need to be ignored from r10 307 * bits above VSID_BITS_1T need to be ignored from r10
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 0d82ef50dc3f..023ec8a13f38 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
82 if (!is_kernel_addr(addr)) { 82 if (!is_kernel_addr(addr)) {
83 ssize = user_segment_size(addr); 83 ssize = user_segment_size(addr);
84 vsid = get_vsid(mm->context.id, addr, ssize); 84 vsid = get_vsid(mm->context.id, addr, ssize);
85 WARN_ON(vsid == 0);
86 } else { 85 } else {
87 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 86 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
88 ssize = mmu_kernel_ssize; 87 ssize = mmu_kernel_ssize;
89 } 88 }
89 WARN_ON(vsid == 0);
90 vpn = hpt_vpn(addr, vsid, ssize); 90 vpn = hpt_vpn(addr, vsid, ssize);
91 rpte = __real_pte(__pte(pte), ptep); 91 rpte = __real_pte(__pte(pte), ptep);
92 92
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index b554879bd31e..3c475d6267c7 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -420,7 +420,20 @@ static struct attribute_group power7_pmu_events_group = {
420 .attrs = power7_events_attr, 420 .attrs = power7_events_attr,
421}; 421};
422 422
423PMU_FORMAT_ATTR(event, "config:0-19");
424
425static struct attribute *power7_pmu_format_attr[] = {
426 &format_attr_event.attr,
427 NULL,
428};
429
430struct attribute_group power7_pmu_format_group = {
431 .name = "format",
432 .attrs = power7_pmu_format_attr,
433};
434
423static const struct attribute_group *power7_pmu_attr_groups[] = { 435static const struct attribute_group *power7_pmu_attr_groups[] = {
436 &power7_pmu_format_group,
424 &power7_pmu_events_group, 437 &power7_pmu_events_group,
425 NULL, 438 NULL,
426}; 439};
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 611e92f291c4..7179726ba5c5 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -69,7 +69,7 @@ static irqreturn_t gpio_halt_irq(int irq, void *__data)
69 return IRQ_HANDLED; 69 return IRQ_HANDLED;
70}; 70};
71 71
72static int __devinit gpio_halt_probe(struct platform_device *pdev) 72static int gpio_halt_probe(struct platform_device *pdev)
73{ 73{
74 enum of_gpio_flags flags; 74 enum of_gpio_flags flags;
75 struct device_node *node = pdev->dev.of_node; 75 struct device_node *node = pdev->dev.of_node;
@@ -128,7 +128,7 @@ static int __devinit gpio_halt_probe(struct platform_device *pdev)
128 return 0; 128 return 0;
129} 129}
130 130
131static int __devexit gpio_halt_remove(struct platform_device *pdev) 131static int gpio_halt_remove(struct platform_device *pdev)
132{ 132{
133 if (halt_node) { 133 if (halt_node) {
134 int gpio = of_get_gpio(halt_node, 0); 134 int gpio = of_get_gpio(halt_node, 0);
@@ -165,7 +165,7 @@ static struct platform_driver gpio_halt_driver = {
165 .of_match_table = gpio_halt_match, 165 .of_match_table = gpio_halt_match,
166 }, 166 },
167 .probe = gpio_halt_probe, 167 .probe = gpio_halt_probe,
168 .remove = __devexit_p(gpio_halt_remove), 168 .remove = gpio_halt_remove,
169}; 169};
170 170
171module_platform_driver(gpio_halt_driver); 171module_platform_driver(gpio_halt_driver);
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index cea2f09c4241..18e3b76c78d7 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -124,9 +124,8 @@ config 6xx
124 select PPC_HAVE_PMU_SUPPORT 124 select PPC_HAVE_PMU_SUPPORT
125 125
126config POWER3 126config POWER3
127 bool
128 depends on PPC64 && PPC_BOOK3S 127 depends on PPC64 && PPC_BOOK3S
129 default y if !POWER4_ONLY 128 def_bool y
130 129
131config POWER4 130config POWER4
132 depends on PPC64 && PPC_BOOK3S 131 depends on PPC64 && PPC_BOOK3S
@@ -145,8 +144,7 @@ config TUNE_CELL
145 but somewhat slower on other machines. This option only changes 144 but somewhat slower on other machines. This option only changes
146 the scheduling of instructions, not the selection of instructions 145 the scheduling of instructions, not the selection of instructions
147 itself, so the resulting kernel will keep running on all other 146 itself, so the resulting kernel will keep running on all other
148 machines. When building a kernel that is supposed to run only 147 machines.
149 on Cell, you should also select the POWER4_ONLY option.
150 148
151# this is temp to handle compat with arch=ppc 149# this is temp to handle compat with arch=ppc
152config 8xx 150config 8xx
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
index 8d4847191ecc..dc9200ca32ed 100644
--- a/arch/s390/include/asm/eadm.h
+++ b/arch/s390/include/asm/eadm.h
@@ -34,6 +34,8 @@ struct arsb {
34 u32 reserved[4]; 34 u32 reserved[4];
35} __packed; 35} __packed;
36 36
37#define EQC_WR_PROHIBIT 22
38
37struct msb { 39struct msb {
38 u8 fmt:4; 40 u8 fmt:4;
39 u8 oc:4; 41 u8 oc:4;
@@ -96,11 +98,13 @@ struct scm_device {
96#define OP_STATE_TEMP_ERR 2 98#define OP_STATE_TEMP_ERR 2
97#define OP_STATE_PERM_ERR 3 99#define OP_STATE_PERM_ERR 3
98 100
101enum scm_event {SCM_CHANGE, SCM_AVAIL};
102
99struct scm_driver { 103struct scm_driver {
100 struct device_driver drv; 104 struct device_driver drv;
101 int (*probe) (struct scm_device *scmdev); 105 int (*probe) (struct scm_device *scmdev);
102 int (*remove) (struct scm_device *scmdev); 106 int (*remove) (struct scm_device *scmdev);
103 void (*notify) (struct scm_device *scmdev); 107 void (*notify) (struct scm_device *scmdev, enum scm_event event);
104 void (*handler) (struct scm_device *scmdev, void *data, int error); 108 void (*handler) (struct scm_device *scmdev, void *data, int error);
105}; 109};
106 110
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 1d8fe2b17ef6..6b32af30878c 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
74 74
75static inline void __tlb_flush_mm(struct mm_struct * mm) 75static inline void __tlb_flush_mm(struct mm_struct * mm)
76{ 76{
77 if (unlikely(cpumask_empty(mm_cpumask(mm))))
78 return;
79 /* 77 /*
80 * If the machine has IDTE we prefer to do a per mm flush 78 * If the machine has IDTE we prefer to do a per mm flush
81 * on all cpus instead of doing a local flush if the mm 79 * on all cpus instead of doing a local flush if the mm
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 550228523267..94feff7d6132 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -636,7 +636,8 @@ ENTRY(mcck_int_handler)
636 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER 636 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
637mcck_skip: 637mcck_skip:
638 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT 638 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
639 mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA 639 stm %r0,%r7,__PT_R0(%r11)
640 mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
640 stm %r8,%r9,__PT_PSW(%r11) 641 stm %r8,%r9,__PT_PSW(%r11)
641 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 642 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
642 l %r1,BASED(.Ldo_machine_check) 643 l %r1,BASED(.Ldo_machine_check)
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 9c837c101297..2e6d60c55f90 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -678,8 +678,9 @@ ENTRY(mcck_int_handler)
678 UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER 678 UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
679 LAST_BREAK %r14 679 LAST_BREAK %r14
680mcck_skip: 680mcck_skip:
681 lghi %r14,__LC_GPREGS_SAVE_AREA 681 lghi %r14,__LC_GPREGS_SAVE_AREA+64
682 mvc __PT_R0(128,%r11),0(%r14) 682 stmg %r0,%r7,__PT_R0(%r11)
683 mvc __PT_R8(64,%r11),0(%r14)
683 stmg %r8,%r9,__PT_PSW(%r11) 684 stmg %r8,%r9,__PT_PSW(%r11)
684 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 685 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
685 lgr %r2,%r11 # pass pointer to pt_regs 686 lgr %r2,%r11 # pass pointer to pt_regs
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a5360de85ec7..29268859d8ee 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -571,6 +571,8 @@ static void __init setup_memory_end(void)
571 571
572 /* Split remaining virtual space between 1:1 mapping & vmemmap array */ 572 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
573 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); 573 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
574 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
575 tmp = SECTION_ALIGN_UP(tmp);
574 tmp = VMALLOC_START - tmp * sizeof(struct page); 576 tmp = VMALLOC_START - tmp * sizeof(struct page);
575 tmp &= ~((vmax >> 11) - 1); /* align to page table level */ 577 tmp &= ~((vmax >> 11) - 1); /* align to page table level */
576 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); 578 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 289127d5241c..3d361f236308 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -84,12 +84,6 @@ config ARCH_DEFCONFIG
84 default "arch/sparc/configs/sparc32_defconfig" if SPARC32 84 default "arch/sparc/configs/sparc32_defconfig" if SPARC32
85 default "arch/sparc/configs/sparc64_defconfig" if SPARC64 85 default "arch/sparc/configs/sparc64_defconfig" if SPARC64
86 86
87# CONFIG_BITS can be used at source level to get 32/64 bits
88config BITS
89 int
90 default 32 if SPARC32
91 default 64 if SPARC64
92
93config IOMMU_HELPER 87config IOMMU_HELPER
94 bool 88 bool
95 default y if SPARC64 89 default y if SPARC64
@@ -197,7 +191,7 @@ config RWSEM_XCHGADD_ALGORITHM
197 191
198config GENERIC_HWEIGHT 192config GENERIC_HWEIGHT
199 bool 193 bool
200 default y if !ULTRA_HAS_POPULATION_COUNT 194 default y
201 195
202config GENERIC_CALIBRATE_DELAY 196config GENERIC_CALIBRATE_DELAY
203 bool 197 bool
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index d06a26601753..6b67e50fb9b4 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -45,6 +45,7 @@
45#define SUN4V_CHIP_NIAGARA3 0x03 45#define SUN4V_CHIP_NIAGARA3 0x03
46#define SUN4V_CHIP_NIAGARA4 0x04 46#define SUN4V_CHIP_NIAGARA4 0x04
47#define SUN4V_CHIP_NIAGARA5 0x05 47#define SUN4V_CHIP_NIAGARA5 0x05
48#define SUN4V_CHIP_SPARC64X 0x8a
48#define SUN4V_CHIP_UNKNOWN 0xff 49#define SUN4V_CHIP_UNKNOWN 0xff
49 50
50#ifndef __ASSEMBLY__ 51#ifndef __ASSEMBLY__
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index a6c94a2bf9d4..5c5125895db8 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -493,6 +493,12 @@ static void __init sun4v_cpu_probe(void)
493 sparc_pmu_type = "niagara5"; 493 sparc_pmu_type = "niagara5";
494 break; 494 break;
495 495
496 case SUN4V_CHIP_SPARC64X:
497 sparc_cpu_type = "SPARC64-X";
498 sparc_fpu_type = "SPARC64-X integrated FPU";
499 sparc_pmu_type = "sparc64-x";
500 break;
501
496 default: 502 default:
497 printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", 503 printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
498 prom_cpu_compatible); 504 prom_cpu_compatible);
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 2feb15c35d9e..26b706a1867d 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -134,6 +134,8 @@ prom_niagara_prefix:
134 .asciz "SUNW,UltraSPARC-T" 134 .asciz "SUNW,UltraSPARC-T"
135prom_sparc_prefix: 135prom_sparc_prefix:
136 .asciz "SPARC-" 136 .asciz "SPARC-"
137prom_sparc64x_prefix:
138 .asciz "SPARC64-X"
137 .align 4 139 .align 4
138prom_root_compatible: 140prom_root_compatible:
139 .skip 64 141 .skip 64
@@ -412,7 +414,7 @@ sun4v_chip_type:
412 cmp %g2, 'T' 414 cmp %g2, 'T'
413 be,pt %xcc, 70f 415 be,pt %xcc, 70f
414 cmp %g2, 'M' 416 cmp %g2, 'M'
415 bne,pn %xcc, 4f 417 bne,pn %xcc, 49f
416 nop 418 nop
417 419
41870: ldub [%g1 + 7], %g2 42070: ldub [%g1 + 7], %g2
@@ -425,7 +427,7 @@ sun4v_chip_type:
425 cmp %g2, '5' 427 cmp %g2, '5'
426 be,pt %xcc, 5f 428 be,pt %xcc, 5f
427 mov SUN4V_CHIP_NIAGARA5, %g4 429 mov SUN4V_CHIP_NIAGARA5, %g4
428 ba,pt %xcc, 4f 430 ba,pt %xcc, 49f
429 nop 431 nop
430 432
43191: sethi %hi(prom_cpu_compatible), %g1 43391: sethi %hi(prom_cpu_compatible), %g1
@@ -439,6 +441,25 @@ sun4v_chip_type:
439 mov SUN4V_CHIP_NIAGARA2, %g4 441 mov SUN4V_CHIP_NIAGARA2, %g4
440 442
4414: 4434:
444 /* Athena */
445 sethi %hi(prom_cpu_compatible), %g1
446 or %g1, %lo(prom_cpu_compatible), %g1
447 sethi %hi(prom_sparc64x_prefix), %g7
448 or %g7, %lo(prom_sparc64x_prefix), %g7
449 mov 9, %g3
45041: ldub [%g7], %g2
451 ldub [%g1], %g4
452 cmp %g2, %g4
453 bne,pn %icc, 49f
454 add %g7, 1, %g7
455 subcc %g3, 1, %g3
456 bne,pt %xcc, 41b
457 add %g1, 1, %g1
458 mov SUN4V_CHIP_SPARC64X, %g4
459 ba,pt %xcc, 5f
460 nop
461
46249:
442 mov SUN4V_CHIP_UNKNOWN, %g4 463 mov SUN4V_CHIP_UNKNOWN, %g4
4435: sethi %hi(sun4v_chip_type), %g2 4645: sethi %hi(sun4v_chip_type), %g2
444 or %g2, %lo(sun4v_chip_type), %g2 465 or %g2, %lo(sun4v_chip_type), %g2
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
index fc4320886a3a..4d1487138d26 100644
--- a/arch/sparc/kernel/leon_pci_grpci2.c
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -186,6 +186,8 @@ struct grpci2_cap_first {
186#define CAP9_IOMAP_OFS 0x20 186#define CAP9_IOMAP_OFS 0x20
187#define CAP9_BARSIZE_OFS 0x24 187#define CAP9_BARSIZE_OFS 0x24
188 188
189#define TGT 256
190
189struct grpci2_priv { 191struct grpci2_priv {
190 struct leon_pci_info info; /* must be on top of this structure */ 192 struct leon_pci_info info; /* must be on top of this structure */
191 struct grpci2_regs *regs; 193 struct grpci2_regs *regs;
@@ -237,8 +239,12 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
237 if (where & 0x3) 239 if (where & 0x3)
238 return -EINVAL; 240 return -EINVAL;
239 241
240 if (bus == 0 && PCI_SLOT(devfn) != 0) 242 if (bus == 0) {
241 devfn += (0x8 * 6); 243 devfn += (0x8 * 6); /* start at AD16=Device0 */
244 } else if (bus == TGT) {
245 bus = 0;
246 devfn = 0; /* special case: bridge controller itself */
247 }
242 248
243 /* Select bus */ 249 /* Select bus */
244 spin_lock_irqsave(&grpci2_dev_lock, flags); 250 spin_lock_irqsave(&grpci2_dev_lock, flags);
@@ -303,8 +309,12 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
303 if (where & 0x3) 309 if (where & 0x3)
304 return -EINVAL; 310 return -EINVAL;
305 311
306 if (bus == 0 && PCI_SLOT(devfn) != 0) 312 if (bus == 0) {
307 devfn += (0x8 * 6); 313 devfn += (0x8 * 6); /* start at AD16=Device0 */
314 } else if (bus == TGT) {
315 bus = 0;
316 devfn = 0; /* special case: bridge controller itself */
317 }
308 318
309 /* Select bus */ 319 /* Select bus */
310 spin_lock_irqsave(&grpci2_dev_lock, flags); 320 spin_lock_irqsave(&grpci2_dev_lock, flags);
@@ -368,7 +378,7 @@ static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn,
368 unsigned int busno = bus->number; 378 unsigned int busno = bus->number;
369 int ret; 379 int ret;
370 380
371 if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) { 381 if (PCI_SLOT(devfn) > 15 || busno > 255) {
372 *val = ~0; 382 *val = ~0;
373 return 0; 383 return 0;
374 } 384 }
@@ -406,7 +416,7 @@ static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn,
406 struct grpci2_priv *priv = grpci2priv; 416 struct grpci2_priv *priv = grpci2priv;
407 unsigned int busno = bus->number; 417 unsigned int busno = bus->number;
408 418
409 if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) 419 if (PCI_SLOT(devfn) > 15 || busno > 255)
410 return 0; 420 return 0;
411 421
412#ifdef GRPCI2_DEBUG_CFGACCESS 422#ifdef GRPCI2_DEBUG_CFGACCESS
@@ -578,15 +588,15 @@ void grpci2_hw_init(struct grpci2_priv *priv)
578 REGSTORE(regs->ahbmst_map[i], priv->pci_area); 588 REGSTORE(regs->ahbmst_map[i], priv->pci_area);
579 589
580 /* Get the GRPCI2 Host PCI ID */ 590 /* Get the GRPCI2 Host PCI ID */
581 grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid); 591 grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid);
582 592
583 /* Get address to first (always defined) capability structure */ 593 /* Get address to first (always defined) capability structure */
584 grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr); 594 grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr);
585 595
586 /* Enable/Disable Byte twisting */ 596 /* Enable/Disable Byte twisting */
587 grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map); 597 grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map);
588 io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); 598 io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0);
589 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map); 599 grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map);
590 600
591 /* Setup the Host's PCI Target BARs for other peripherals to access, 601 /* Setup the Host's PCI Target BARs for other peripherals to access,
592 * and do DMA to the host's memory. The target BARs can be sized and 602 * and do DMA to the host's memory. The target BARs can be sized and
@@ -617,17 +627,18 @@ void grpci2_hw_init(struct grpci2_priv *priv)
617 pciadr = 0; 627 pciadr = 0;
618 } 628 }
619 } 629 }
620 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz); 630 grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4,
621 grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); 631 bar_sz);
622 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); 632 grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
633 grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
623 printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", 634 printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n",
624 i, pciadr, ahbadr); 635 i, pciadr, ahbadr);
625 } 636 }
626 637
627 /* set as bus master and enable pci memory responses */ 638 /* set as bus master and enable pci memory responses */
628 grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data); 639 grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data);
629 data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); 640 data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
630 grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data); 641 grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data);
631 642
632 /* Enable Error respone (CPU-TRAP) on illegal memory access. */ 643 /* Enable Error respone (CPU-TRAP) on illegal memory access. */
633 REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); 644 REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE);
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
index 8c5eff6d6df5..47684815e5c8 100644
--- a/arch/tile/configs/tilegx_defconfig
+++ b/arch/tile/configs/tilegx_defconfig
@@ -330,7 +330,6 @@ CONFIG_MD_RAID0=m
330CONFIG_MD_RAID1=m 330CONFIG_MD_RAID1=m
331CONFIG_MD_RAID10=m 331CONFIG_MD_RAID10=m
332CONFIG_MD_RAID456=m 332CONFIG_MD_RAID456=m
333CONFIG_MULTICORE_RAID456=y
334CONFIG_MD_FAULTY=m 333CONFIG_MD_FAULTY=m
335CONFIG_BLK_DEV_DM=m 334CONFIG_BLK_DEV_DM=m
336CONFIG_DM_DEBUG=y 335CONFIG_DM_DEBUG=y
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
index e7a3dfcbcda7..dd2b8f0c631f 100644
--- a/arch/tile/configs/tilepro_defconfig
+++ b/arch/tile/configs/tilepro_defconfig
@@ -324,7 +324,6 @@ CONFIG_MD_RAID0=m
324CONFIG_MD_RAID1=m 324CONFIG_MD_RAID1=m
325CONFIG_MD_RAID10=m 325CONFIG_MD_RAID10=m
326CONFIG_MD_RAID456=m 326CONFIG_MD_RAID456=m
327CONFIG_MULTICORE_RAID456=y
328CONFIG_MD_FAULTY=m 327CONFIG_MD_FAULTY=m
329CONFIG_BLK_DEV_DM=m 328CONFIG_BLK_DEV_DM=m
330CONFIG_DM_DEBUG=y 329CONFIG_DM_DEBUG=y
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index d3ddd17405d0..5a6d2873f80e 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -77,6 +77,7 @@ struct arch_specific_insn {
77 * a post_handler or break_handler). 77 * a post_handler or break_handler).
78 */ 78 */
79 int boostable; 79 int boostable;
80 bool if_modifier;
80}; 81};
81 82
82struct arch_optimized_insn { 83struct arch_optimized_insn {
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 635a74d22409..4979778cc7fb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -414,8 +414,8 @@ struct kvm_vcpu_arch {
414 gpa_t time; 414 gpa_t time;
415 struct pvclock_vcpu_time_info hv_clock; 415 struct pvclock_vcpu_time_info hv_clock;
416 unsigned int hw_tsc_khz; 416 unsigned int hw_tsc_khz;
417 unsigned int time_offset; 417 struct gfn_to_hva_cache pv_time;
418 struct page *time_page; 418 bool pv_time_enabled;
419 /* set guest stopped flag in pvclock flags field */ 419 /* set guest stopped flag in pvclock flags field */
420 bool pvclock_set_guest_stopped_request; 420 bool pvclock_set_guest_stopped_request;
421 421
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index c20d1ce62dc6..e709884d0ef9 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
382 return _hypercall3(int, console_io, cmd, count, str); 382 return _hypercall3(int, console_io, cmd, count, str);
383} 383}
384 384
385extern int __must_check HYPERVISOR_physdev_op_compat(int, void *); 385extern int __must_check xen_physdev_op_compat(int, void *);
386 386
387static inline int 387static inline int
388HYPERVISOR_physdev_op(int cmd, void *arg) 388HYPERVISOR_physdev_op(int cmd, void *arg)
389{ 389{
390 int rc = _hypercall2(int, physdev_op, cmd, arg); 390 int rc = _hypercall2(int, physdev_op, cmd, arg);
391 if (unlikely(rc == -ENOSYS)) 391 if (unlikely(rc == -ENOSYS))
392 rc = HYPERVISOR_physdev_op_compat(cmd, arg); 392 rc = xen_physdev_op_compat(cmd, arg);
393 return rc; 393 return rc;
394} 394}
395 395
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 892ce40a7470..7a060f4b411f 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -44,6 +44,7 @@
44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
46 46
47#define MSR_PLATFORM_INFO 0x000000ce
47#define MSR_MTRRcap 0x000000fe 48#define MSR_MTRRcap 0x000000fe
48#define MSR_IA32_BBL_CR_CTL 0x00000119 49#define MSR_IA32_BBL_CR_CTL 0x00000119
49#define MSR_IA32_BBL_CR_CTL3 0x0000011e 50#define MSR_IA32_BBL_CR_CTL3 0x0000011e
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 529c8931fc02..dab7580c47ae 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -101,6 +101,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
101 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 101 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
102 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 102 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
103 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 103 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
104 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
105 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
106 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
107 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
104 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ 108 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
105 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 109 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
106 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 110 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 3f06e6149981..7bfe318d3d8a 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -375,6 +375,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
375 else 375 else
376 p->ainsn.boostable = -1; 376 p->ainsn.boostable = -1;
377 377
378 /* Check whether the instruction modifies Interrupt Flag or not */
379 p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
380
378 /* Also, displacement change doesn't affect the first byte */ 381 /* Also, displacement change doesn't affect the first byte */
379 p->opcode = p->ainsn.insn[0]; 382 p->opcode = p->ainsn.insn[0];
380} 383}
@@ -434,7 +437,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
434 __this_cpu_write(current_kprobe, p); 437 __this_cpu_write(current_kprobe, p);
435 kcb->kprobe_saved_flags = kcb->kprobe_old_flags 438 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
436 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); 439 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
437 if (is_IF_modifier(p->ainsn.insn)) 440 if (p->ainsn.if_modifier)
438 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; 441 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
439} 442}
440 443
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index 7890bc838952..d893e8ed8ac9 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
90 struct microcode_intel ***mc_saved; 90 struct microcode_intel ***mc_saved;
91 91
92 mc_saved = (struct microcode_intel ***) 92 mc_saved = (struct microcode_intel ***)
93 __pa_symbol(&mc_saved_data->mc_saved); 93 __pa_nodebug(&mc_saved_data->mc_saved);
94 for (i = 0; i < mc_saved_data->mc_saved_count; i++) { 94 for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
95 struct microcode_intel *p; 95 struct microcode_intel *p;
96 96
97 p = *(struct microcode_intel **) 97 p = *(struct microcode_intel **)
98 __pa(mc_saved_data->mc_saved + i); 98 __pa_nodebug(mc_saved_data->mc_saved + i);
99 mc_saved_tmp[i] = (struct microcode_intel *)__pa(p); 99 mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
100 } 100 }
101} 101}
102#endif 102#endif
@@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end,
562 struct cpio_data cd; 562 struct cpio_data cd;
563 long offset = 0; 563 long offset = 0;
564#ifdef CONFIG_X86_32 564#ifdef CONFIG_X86_32
565 char *p = (char *)__pa_symbol(ucode_name); 565 char *p = (char *)__pa_nodebug(ucode_name);
566#else 566#else
567 char *p = ucode_name; 567 char *p = ucode_name;
568#endif 568#endif
@@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
630 if (mc_intel == NULL) 630 if (mc_intel == NULL)
631 return; 631 return;
632 632
633 delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info); 633 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
634 current_mc_date_p = (int *)__pa_symbol(&current_mc_date); 634 current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
635 635
636 *delay_ucode_info_p = 1; 636 *delay_ucode_info_p = 1;
637 *current_mc_date_p = mc_intel->hdr.date; 637 *current_mc_date_p = mc_intel->hdr.date;
@@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
659} 659}
660#endif 660#endif
661 661
662static int apply_microcode_early(struct mc_saved_data *mc_saved_data, 662static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data,
663 struct ucode_cpu_info *uci) 663 struct ucode_cpu_info *uci)
664{ 664{
665 struct microcode_intel *mc_intel; 665 struct microcode_intel *mc_intel;
666 unsigned int val[2]; 666 unsigned int val[2];
@@ -741,15 +741,15 @@ load_ucode_intel_bsp(void)
741#ifdef CONFIG_X86_32 741#ifdef CONFIG_X86_32
742 struct boot_params *boot_params_p; 742 struct boot_params *boot_params_p;
743 743
744 boot_params_p = (struct boot_params *)__pa_symbol(&boot_params); 744 boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params);
745 ramdisk_image = boot_params_p->hdr.ramdisk_image; 745 ramdisk_image = boot_params_p->hdr.ramdisk_image;
746 ramdisk_size = boot_params_p->hdr.ramdisk_size; 746 ramdisk_size = boot_params_p->hdr.ramdisk_size;
747 initrd_start_early = ramdisk_image; 747 initrd_start_early = ramdisk_image;
748 initrd_end_early = initrd_start_early + ramdisk_size; 748 initrd_end_early = initrd_start_early + ramdisk_size;
749 749
750 _load_ucode_intel_bsp( 750 _load_ucode_intel_bsp(
751 (struct mc_saved_data *)__pa_symbol(&mc_saved_data), 751 (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
752 (unsigned long *)__pa_symbol(&mc_saved_in_initrd), 752 (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
753 initrd_start_early, initrd_end_early, &uci); 753 initrd_start_early, initrd_end_early, &uci);
754#else 754#else
755 ramdisk_image = boot_params.hdr.ramdisk_image; 755 ramdisk_image = boot_params.hdr.ramdisk_image;
@@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void)
772 unsigned long *initrd_start_p; 772 unsigned long *initrd_start_p;
773 773
774 mc_saved_in_initrd_p = 774 mc_saved_in_initrd_p =
775 (unsigned long *)__pa_symbol(mc_saved_in_initrd); 775 (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
776 mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data); 776 mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
777 initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start); 777 initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
778 initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p); 778 initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
779#else 779#else
780 mc_saved_data_p = &mc_saved_data; 780 mc_saved_data_p = &mc_saved_data;
781 mc_saved_in_initrd_p = mc_saved_in_initrd; 781 mc_saved_in_initrd_p = mc_saved_in_initrd;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f71500af1f81..f19ac0aca60d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1406 unsigned long flags, this_tsc_khz; 1406 unsigned long flags, this_tsc_khz;
1407 struct kvm_vcpu_arch *vcpu = &v->arch; 1407 struct kvm_vcpu_arch *vcpu = &v->arch;
1408 struct kvm_arch *ka = &v->kvm->arch; 1408 struct kvm_arch *ka = &v->kvm->arch;
1409 void *shared_kaddr;
1410 s64 kernel_ns, max_kernel_ns; 1409 s64 kernel_ns, max_kernel_ns;
1411 u64 tsc_timestamp, host_tsc; 1410 u64 tsc_timestamp, host_tsc;
1412 struct pvclock_vcpu_time_info *guest_hv_clock; 1411 struct pvclock_vcpu_time_info guest_hv_clock;
1413 u8 pvclock_flags; 1412 u8 pvclock_flags;
1414 bool use_master_clock; 1413 bool use_master_clock;
1415 1414
1416 kernel_ns = 0; 1415 kernel_ns = 0;
1417 host_tsc = 0; 1416 host_tsc = 0;
1418 1417
1419 /* Keep irq disabled to prevent changes to the clock */
1420 local_irq_save(flags);
1421 this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
1422 if (unlikely(this_tsc_khz == 0)) {
1423 local_irq_restore(flags);
1424 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1425 return 1;
1426 }
1427
1428 /* 1418 /*
1429 * If the host uses TSC clock, then passthrough TSC as stable 1419 * If the host uses TSC clock, then passthrough TSC as stable
1430 * to the guest. 1420 * to the guest.
@@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1436 kernel_ns = ka->master_kernel_ns; 1426 kernel_ns = ka->master_kernel_ns;
1437 } 1427 }
1438 spin_unlock(&ka->pvclock_gtod_sync_lock); 1428 spin_unlock(&ka->pvclock_gtod_sync_lock);
1429
1430 /* Keep irq disabled to prevent changes to the clock */
1431 local_irq_save(flags);
1432 this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
1433 if (unlikely(this_tsc_khz == 0)) {
1434 local_irq_restore(flags);
1435 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1436 return 1;
1437 }
1439 if (!use_master_clock) { 1438 if (!use_master_clock) {
1440 host_tsc = native_read_tsc(); 1439 host_tsc = native_read_tsc();
1441 kernel_ns = get_kernel_ns(); 1440 kernel_ns = get_kernel_ns();
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1463 1462
1464 local_irq_restore(flags); 1463 local_irq_restore(flags);
1465 1464
1466 if (!vcpu->time_page) 1465 if (!vcpu->pv_time_enabled)
1467 return 0; 1466 return 0;
1468 1467
1469 /* 1468 /*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1525 */ 1524 */
1526 vcpu->hv_clock.version += 2; 1525 vcpu->hv_clock.version += 2;
1527 1526
1528 shared_kaddr = kmap_atomic(vcpu->time_page); 1527 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
1529 1528 &guest_hv_clock, sizeof(guest_hv_clock))))
1530 guest_hv_clock = shared_kaddr + vcpu->time_offset; 1529 return 0;
1531 1530
1532 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 1531 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
1533 pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); 1532 pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
1534 1533
1535 if (vcpu->pvclock_set_guest_stopped_request) { 1534 if (vcpu->pvclock_set_guest_stopped_request) {
1536 pvclock_flags |= PVCLOCK_GUEST_STOPPED; 1535 pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1543 1542
1544 vcpu->hv_clock.flags = pvclock_flags; 1543 vcpu->hv_clock.flags = pvclock_flags;
1545 1544
1546 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, 1545 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1547 sizeof(vcpu->hv_clock)); 1546 &vcpu->hv_clock,
1548 1547 sizeof(vcpu->hv_clock));
1549 kunmap_atomic(shared_kaddr);
1550
1551 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
1552 return 0; 1548 return 0;
1553} 1549}
1554 1550
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
1837 1833
1838static void kvmclock_reset(struct kvm_vcpu *vcpu) 1834static void kvmclock_reset(struct kvm_vcpu *vcpu)
1839{ 1835{
1840 if (vcpu->arch.time_page) { 1836 vcpu->arch.pv_time_enabled = false;
1841 kvm_release_page_dirty(vcpu->arch.time_page);
1842 vcpu->arch.time_page = NULL;
1843 }
1844} 1837}
1845 1838
1846static void accumulate_steal_time(struct kvm_vcpu *vcpu) 1839static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1947 break; 1940 break;
1948 case MSR_KVM_SYSTEM_TIME_NEW: 1941 case MSR_KVM_SYSTEM_TIME_NEW:
1949 case MSR_KVM_SYSTEM_TIME: { 1942 case MSR_KVM_SYSTEM_TIME: {
1943 u64 gpa_offset;
1950 kvmclock_reset(vcpu); 1944 kvmclock_reset(vcpu);
1951 1945
1952 vcpu->arch.time = data; 1946 vcpu->arch.time = data;
@@ -1956,14 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1956 if (!(data & 1)) 1950 if (!(data & 1))
1957 break; 1951 break;
1958 1952
1959 /* ...but clean it before doing the actual write */ 1953 gpa_offset = data & ~(PAGE_MASK | 1);
1960 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1961 1954
1962 vcpu->arch.time_page = 1955 /* Check that the address is 32-byte aligned. */
1963 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); 1956 if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
1957 break;
1964 1958
1965 if (is_error_page(vcpu->arch.time_page)) 1959 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
1966 vcpu->arch.time_page = NULL; 1960 &vcpu->arch.pv_time, data & ~1ULL))
1961 vcpu->arch.pv_time_enabled = false;
1962 else
1963 vcpu->arch.pv_time_enabled = true;
1967 1964
1968 break; 1965 break;
1969 } 1966 }
@@ -2967,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
2967 */ 2964 */
2968static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 2965static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
2969{ 2966{
2970 if (!vcpu->arch.time_page) 2967 if (!vcpu->arch.pv_time_enabled)
2971 return -EINVAL; 2968 return -EINVAL;
2972 vcpu->arch.pvclock_set_guest_stopped_request = true; 2969 vcpu->arch.pvclock_set_guest_stopped_request = true;
2973 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2970 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6718,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
6718 goto fail_free_wbinvd_dirty_mask; 6715 goto fail_free_wbinvd_dirty_mask;
6719 6716
6720 vcpu->arch.ia32_tsc_adjust_msr = 0x0; 6717 vcpu->arch.ia32_tsc_adjust_msr = 0x0;
6718 vcpu->arch.pv_time_enabled = false;
6721 kvm_async_pf_hash_reset(vcpu); 6719 kvm_async_pf_hash_reset(vcpu);
6722 kvm_pmu_init(vcpu); 6720 kvm_pmu_init(vcpu);
6723 6721
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 05928aae911e..906fea315791 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
74 char c; 74 char c;
75 unsigned zero_len; 75 unsigned zero_len;
76 76
77 for (; len; --len) { 77 for (; len; --len, to++) {
78 if (__get_user_nocheck(c, from++, sizeof(char))) 78 if (__get_user_nocheck(c, from++, sizeof(char)))
79 break; 79 break;
80 if (__put_user_nocheck(c, to++, sizeof(char))) 80 if (__put_user_nocheck(c, to, sizeof(char)))
81 break; 81 break;
82 } 82 }
83 83
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index e8e34938c57d..6afbb2ca9a0a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3)
1467 __xen_write_cr3(true, cr3); 1467 __xen_write_cr3(true, cr3);
1468 1468
1469 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ 1469 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1470
1471 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1472} 1470}
1473#endif 1471#endif
1474 1472
@@ -2122,6 +2120,7 @@ static void __init xen_post_allocator_init(void)
2122#endif 2120#endif
2123 2121
2124#ifdef CONFIG_X86_64 2122#ifdef CONFIG_X86_64
2123 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2125 SetPagePinned(virt_to_page(level3_user_vsyscall)); 2124 SetPagePinned(virt_to_page(level3_user_vsyscall));
2126#endif 2125#endif
2127 xen_mark_init_mm_pinned(); 2126 xen_mark_init_mm_pinned();
diff --git a/block/blk-flush.c b/block/blk-flush.c
index db8f1b507857..cc2b827a853c 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -444,7 +444,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
444 * copied from blk_rq_pos(rq). 444 * copied from blk_rq_pos(rq).
445 */ 445 */
446 if (error_sector) 446 if (error_sector)
447 *error_sector = bio->bi_sector; 447 *error_sector = bio->bi_sector;
448 448
449 if (!bio_flagged(bio, BIO_UPTODATE)) 449 if (!bio_flagged(bio, BIO_UPTODATE))
450 ret = -EIO; 450 ret = -EIO;
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 789cdea05893..ae95ee6a58aa 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -257,6 +257,7 @@ void delete_partition(struct gendisk *disk, int partno)
257 257
258 hd_struct_put(part); 258 hd_struct_put(part);
259} 259}
260EXPORT_SYMBOL(delete_partition);
260 261
261static ssize_t whole_disk_show(struct device *dev, 262static ssize_t whole_disk_show(struct device *dev,
262 struct device_attribute *attr, char *buf) 263 struct device_attribute *attr, char *buf)
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index 1e5d8a40101e..fefc2ca7cc3e 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -405,7 +405,7 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
405 return rc; 405 return rc;
406 data_len = estatus->data_length; 406 data_len = estatus->data_length;
407 gdata = (struct acpi_hest_generic_data *)(estatus + 1); 407 gdata = (struct acpi_hest_generic_data *)(estatus + 1);
408 while (data_len > sizeof(*gdata)) { 408 while (data_len >= sizeof(*gdata)) {
409 gedata_len = gdata->error_data_length; 409 gedata_len = gdata->error_data_length;
410 if (gedata_len > data_len - sizeof(*gdata)) 410 if (gedata_len > data_len - sizeof(*gdata))
411 return -EINVAL; 411 return -EINVAL;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 0ac546d5e53f..5ff173066127 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -646,6 +646,7 @@ static void handle_root_bridge_insertion(acpi_handle handle)
646 646
647static void handle_root_bridge_removal(struct acpi_device *device) 647static void handle_root_bridge_removal(struct acpi_device *device)
648{ 648{
649 acpi_status status;
649 struct acpi_eject_event *ej_event; 650 struct acpi_eject_event *ej_event;
650 651
651 ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); 652 ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
@@ -661,7 +662,9 @@ static void handle_root_bridge_removal(struct acpi_device *device)
661 ej_event->device = device; 662 ej_event->device = device;
662 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; 663 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
663 664
664 acpi_bus_hot_remove_device(ej_event); 665 status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
666 if (ACPI_FAILURE(status))
667 kfree(ej_event);
665} 668}
666 669
667static void _handle_hotplug_event_root(struct work_struct *work) 670static void _handle_hotplug_event_root(struct work_struct *work)
@@ -676,8 +679,9 @@ static void _handle_hotplug_event_root(struct work_struct *work)
676 handle = hp_work->handle; 679 handle = hp_work->handle;
677 type = hp_work->type; 680 type = hp_work->type;
678 681
679 root = acpi_pci_find_root(handle); 682 acpi_scan_lock_acquire();
680 683
684 root = acpi_pci_find_root(handle);
681 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 685 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
682 686
683 switch (type) { 687 switch (type) {
@@ -711,6 +715,7 @@ static void _handle_hotplug_event_root(struct work_struct *work)
711 break; 715 break;
712 } 716 }
713 717
718 acpi_scan_lock_release();
714 kfree(hp_work); /* allocated in handle_hotplug_event_bridge */ 719 kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
715 kfree(buffer.pointer); 720 kfree(buffer.pointer);
716} 721}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 24213033fbae..9c1a435d10e6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -193,6 +193,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
193 }, 193 },
194 { 194 {
195 .callback = init_nvs_nosave, 195 .callback = init_nvs_nosave,
196 .ident = "Sony Vaio VGN-FW21M",
197 .matches = {
198 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
199 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
200 },
201 },
202 {
203 .callback = init_nvs_nosave,
196 .ident = "Sony Vaio VPCEB17FX", 204 .ident = "Sony Vaio VPCEB17FX",
197 .matches = { 205 .matches = {
198 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 206 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index 093c43554963..1f44e56cc65d 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -158,7 +158,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
158EXPORT_SYMBOL(tegra_ahb_enable_smmu); 158EXPORT_SYMBOL(tegra_ahb_enable_smmu);
159#endif 159#endif
160 160
161#ifdef CONFIG_PM_SLEEP 161#ifdef CONFIG_PM
162static int tegra_ahb_suspend(struct device *dev) 162static int tegra_ahb_suspend(struct device *dev)
163{ 163{
164 int i; 164 int i;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 3e751b74615e..a5a3ebcbdd2c 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -59,15 +59,16 @@ config ATA_ACPI
59 option libata.noacpi=1 59 option libata.noacpi=1
60 60
61config SATA_ZPODD 61config SATA_ZPODD
62 bool "SATA Zero Power ODD Support" 62 bool "SATA Zero Power Optical Disc Drive (ZPODD) support"
63 depends on ATA_ACPI 63 depends on ATA_ACPI
64 default n 64 default n
65 help 65 help
66 This option adds support for SATA ZPODD. It requires both 66 This option adds support for SATA Zero Power Optical Disc
67 ODD and the platform support, and if enabled, will automatically 67 Drive (ZPODD). It requires both the ODD and the platform
68 power on/off the ODD when certain condition is satisfied. This 68 support, and if enabled, will automatically power on/off the
69 does not impact user's experience of the ODD, only power is saved 69 ODD when certain condition is satisfied. This does not impact
70 when ODD is not in use(i.e. no disc inside). 70 end user's experience of the ODD, only power is saved when
71 the ODD is not in use (i.e. no disc inside).
71 72
72 If unsure, say N. 73 If unsure, say N.
73 74
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index a99112cfd8b1..6a67b07de494 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -281,6 +281,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
281 { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ 281 { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
282 { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ 282 { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
283 { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ 283 { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
284 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
285 { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
284 { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ 286 { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
285 { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */ 287 { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
286 { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */ 288 { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d2ba439cfe54..ffdd32d22602 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1547,6 +1547,10 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev)
1547 1547
1548static int prefer_ms_hyperv = 1; 1548static int prefer_ms_hyperv = 1;
1549module_param(prefer_ms_hyperv, int, 0); 1549module_param(prefer_ms_hyperv, int, 0);
1550MODULE_PARM_DESC(prefer_ms_hyperv,
1551 "Prefer Hyper-V paravirtualization drivers instead of ATA, "
1552 "0 - Use ATA drivers, "
1553 "1 (Default) - Use the paravirtualization drivers.");
1550 1554
1551static void piix_ignore_devices_quirk(struct ata_host *host) 1555static void piix_ignore_devices_quirk(struct ata_host *host)
1552{ 1556{
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index beea3115577e..8a52dab412e2 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -1027,7 +1027,7 @@ static void ata_acpi_register_power_resource(struct ata_device *dev)
1027 1027
1028 handle = ata_dev_acpi_handle(dev); 1028 handle = ata_dev_acpi_handle(dev);
1029 if (handle) 1029 if (handle)
1030 acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev); 1030 acpi_dev_pm_add_dependent(handle, &sdev->sdev_gendev);
1031} 1031}
1032 1032
1033static void ata_acpi_unregister_power_resource(struct ata_device *dev) 1033static void ata_acpi_unregister_power_resource(struct ata_device *dev)
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 70b0e01372b3..6ef27e98c508 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -661,18 +661,7 @@ static struct platform_driver pata_s3c_driver = {
661 }, 661 },
662}; 662};
663 663
664static int __init pata_s3c_init(void) 664module_platform_driver_probe(pata_s3c_driver, pata_s3c_probe);
665{
666 return platform_driver_probe(&pata_s3c_driver, pata_s3c_probe);
667}
668
669static void __exit pata_s3c_exit(void)
670{
671 platform_driver_unregister(&pata_s3c_driver);
672}
673
674module_init(pata_s3c_init);
675module_exit(pata_s3c_exit);
676 665
677MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>"); 666MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>");
678MODULE_DESCRIPTION("low-level driver for Samsung PATA controller"); 667MODULE_DESCRIPTION("low-level driver for Samsung PATA controller");
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 124b2c1d9c0b..608f82fed632 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1511,8 +1511,7 @@ error_exit_with_cleanup:
1511 1511
1512 if (hcr_base) 1512 if (hcr_base)
1513 iounmap(hcr_base); 1513 iounmap(hcr_base);
1514 if (host_priv) 1514 kfree(host_priv);
1515 kfree(host_priv);
1516 1515
1517 return retval; 1516 return retval;
1518} 1517}
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 6ee17bb391a9..b8bdfe61daa6 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -101,6 +101,8 @@ static inline int hypervisor_init(void) { return 0; }
101extern int platform_bus_init(void); 101extern int platform_bus_init(void);
102extern void cpu_dev_init(void); 102extern void cpu_dev_init(void);
103 103
104struct kobject *virtual_device_parent(struct device *dev);
105
104extern int bus_add_device(struct device *dev); 106extern int bus_add_device(struct device *dev);
105extern void bus_probe_device(struct device *dev); 107extern void bus_probe_device(struct device *dev);
106extern void bus_remove_device(struct device *dev); 108extern void bus_remove_device(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 519865b53f76..2ae2d2f92b6b 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1205,26 +1205,10 @@ static void system_root_device_release(struct device *dev)
1205{ 1205{
1206 kfree(dev); 1206 kfree(dev);
1207} 1207}
1208/** 1208
1209 * subsys_system_register - register a subsystem at /sys/devices/system/ 1209static int subsys_register(struct bus_type *subsys,
1210 * @subsys: system subsystem 1210 const struct attribute_group **groups,
1211 * @groups: default attributes for the root device 1211 struct kobject *parent_of_root)
1212 *
1213 * All 'system' subsystems have a /sys/devices/system/<name> root device
1214 * with the name of the subsystem. The root device can carry subsystem-
1215 * wide attributes. All registered devices are below this single root
1216 * device and are named after the subsystem with a simple enumeration
1217 * number appended. The registered devices are not explicitely named;
1218 * only 'id' in the device needs to be set.
1219 *
1220 * Do not use this interface for anything new, it exists for compatibility
1221 * with bad ideas only. New subsystems should use plain subsystems; and
1222 * add the subsystem-wide attributes should be added to the subsystem
1223 * directory itself and not some create fake root-device placed in
1224 * /sys/devices/system/<name>.
1225 */
1226int subsys_system_register(struct bus_type *subsys,
1227 const struct attribute_group **groups)
1228{ 1212{
1229 struct device *dev; 1213 struct device *dev;
1230 int err; 1214 int err;
@@ -1243,7 +1227,7 @@ int subsys_system_register(struct bus_type *subsys,
1243 if (err < 0) 1227 if (err < 0)
1244 goto err_name; 1228 goto err_name;
1245 1229
1246 dev->kobj.parent = &system_kset->kobj; 1230 dev->kobj.parent = parent_of_root;
1247 dev->groups = groups; 1231 dev->groups = groups;
1248 dev->release = system_root_device_release; 1232 dev->release = system_root_device_release;
1249 1233
@@ -1263,8 +1247,55 @@ err_dev:
1263 bus_unregister(subsys); 1247 bus_unregister(subsys);
1264 return err; 1248 return err;
1265} 1249}
1250
1251/**
1252 * subsys_system_register - register a subsystem at /sys/devices/system/
1253 * @subsys: system subsystem
1254 * @groups: default attributes for the root device
1255 *
1256 * All 'system' subsystems have a /sys/devices/system/<name> root device
1257 * with the name of the subsystem. The root device can carry subsystem-
1258 * wide attributes. All registered devices are below this single root
1259 * device and are named after the subsystem with a simple enumeration
1260 * number appended. The registered devices are not explicitely named;
1261 * only 'id' in the device needs to be set.
1262 *
1263 * Do not use this interface for anything new, it exists for compatibility
1264 * with bad ideas only. New subsystems should use plain subsystems; and
1265 * add the subsystem-wide attributes should be added to the subsystem
1266 * directory itself and not some create fake root-device placed in
1267 * /sys/devices/system/<name>.
1268 */
1269int subsys_system_register(struct bus_type *subsys,
1270 const struct attribute_group **groups)
1271{
1272 return subsys_register(subsys, groups, &system_kset->kobj);
1273}
1266EXPORT_SYMBOL_GPL(subsys_system_register); 1274EXPORT_SYMBOL_GPL(subsys_system_register);
1267 1275
1276/**
1277 * subsys_virtual_register - register a subsystem at /sys/devices/virtual/
1278 * @subsys: virtual subsystem
1279 * @groups: default attributes for the root device
1280 *
1281 * All 'virtual' subsystems have a /sys/devices/system/<name> root device
1282 * with the name of the subystem. The root device can carry subsystem-wide
1283 * attributes. All registered devices are below this single root device.
1284 * There's no restriction on device naming. This is for kernel software
1285 * constructs which need sysfs interface.
1286 */
1287int subsys_virtual_register(struct bus_type *subsys,
1288 const struct attribute_group **groups)
1289{
1290 struct kobject *virtual_dir;
1291
1292 virtual_dir = virtual_device_parent(NULL);
1293 if (!virtual_dir)
1294 return -ENOMEM;
1295
1296 return subsys_register(subsys, groups, virtual_dir);
1297}
1298
1268int __init buses_init(void) 1299int __init buses_init(void)
1269{ 1300{
1270 bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL); 1301 bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 56536f4b0f6b..f58084a86e8c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -690,7 +690,7 @@ void device_initialize(struct device *dev)
690 set_dev_node(dev, -1); 690 set_dev_node(dev, -1);
691} 691}
692 692
693static struct kobject *virtual_device_parent(struct device *dev) 693struct kobject *virtual_device_parent(struct device *dev)
694{ 694{
695 static struct kobject *virtual_dir = NULL; 695 static struct kobject *virtual_dir = NULL;
696 696
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 5dc0daed8fac..b81ddfea1da0 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -532,11 +532,11 @@ config BLK_DEV_RBD
532 If unsure, say N. 532 If unsure, say N.
533 533
534config BLK_DEV_RSXX 534config BLK_DEV_RSXX
535 tristate "RamSam PCIe Flash SSD Device Driver" 535 tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver"
536 depends on PCI 536 depends on PCI
537 help 537 help
538 Device driver for IBM's high speed PCIe SSD 538 Device driver for IBM's high speed PCIe SSD
539 storage devices: RamSan-70 and RamSan-80. 539 storage devices: FlashSystem-70 and FlashSystem-80.
540 540
541 To compile this driver as a module, choose M here: the 541 To compile this driver as a module, choose M here: the
542 module will be called rsxx. 542 module will be called rsxx.
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index ade58bc8f3c4..1c1b8e544aa2 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -4206,7 +4206,7 @@ static int cciss_find_cfgtables(ctlr_info_t *h)
4206 if (rc) 4206 if (rc)
4207 return rc; 4207 return rc;
4208 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 4208 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
4209 cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable)); 4209 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
4210 if (!h->cfgtable) 4210 if (!h->cfgtable)
4211 return -ENOMEM; 4211 return -ENOMEM;
4212 rc = write_driver_ver_to_cfgtable(h->cfgtable); 4212 rc = write_driver_ver_to_cfgtable(h->cfgtable);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 747bb2af69dc..fe5f6403417f 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1044,12 +1044,29 @@ static int loop_clr_fd(struct loop_device *lo)
1044 lo->lo_state = Lo_unbound; 1044 lo->lo_state = Lo_unbound;
1045 /* This is safe: open() is still holding a reference. */ 1045 /* This is safe: open() is still holding a reference. */
1046 module_put(THIS_MODULE); 1046 module_put(THIS_MODULE);
1047 if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
1048 ioctl_by_bdev(bdev, BLKRRPART, 0);
1049 lo->lo_flags = 0; 1047 lo->lo_flags = 0;
1050 if (!part_shift) 1048 if (!part_shift)
1051 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; 1049 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1052 mutex_unlock(&lo->lo_ctl_mutex); 1050 mutex_unlock(&lo->lo_ctl_mutex);
1051
1052 /*
1053 * Remove all partitions, since BLKRRPART won't remove user
1054 * added partitions when max_part=0
1055 */
1056 if (bdev) {
1057 struct disk_part_iter piter;
1058 struct hd_struct *part;
1059
1060 mutex_lock_nested(&bdev->bd_mutex, 1);
1061 invalidate_partition(bdev->bd_disk, 0);
1062 disk_part_iter_init(&piter, bdev->bd_disk,
1063 DISK_PITER_INCL_EMPTY);
1064 while ((part = disk_part_iter_next(&piter)))
1065 delete_partition(bdev->bd_disk, part->partno);
1066 disk_part_iter_exit(&piter);
1067 mutex_unlock(&bdev->bd_mutex);
1068 }
1069
1053 /* 1070 /*
1054 * Need not hold lo_ctl_mutex to fput backing file. 1071 * Need not hold lo_ctl_mutex to fput backing file.
1055 * Calling fput holding lo_ctl_mutex triggers a circular 1072 * Calling fput holding lo_ctl_mutex triggers a circular
@@ -1623,6 +1640,7 @@ static int loop_add(struct loop_device **l, int i)
1623 goto out_free_dev; 1640 goto out_free_dev;
1624 i = err; 1641 i = err;
1625 1642
1643 err = -ENOMEM;
1626 lo->lo_queue = blk_alloc_queue(GFP_KERNEL); 1644 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1627 if (!lo->lo_queue) 1645 if (!lo->lo_queue)
1628 goto out_free_dev; 1646 goto out_free_dev;
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 1788f491e0fb..076ae7f1b781 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -890,8 +890,10 @@ static int mg_probe(struct platform_device *plat_dev)
890 gpio_direction_output(host->rst, 1); 890 gpio_direction_output(host->rst, 1);
891 891
892 /* reset out pin */ 892 /* reset out pin */
893 if (!(prv_data->dev_attr & MG_DEV_MASK)) 893 if (!(prv_data->dev_attr & MG_DEV_MASK)) {
894 err = -EINVAL;
894 goto probe_err_3a; 895 goto probe_err_3a;
896 }
895 897
896 if (prv_data->dev_attr != MG_BOOT_DEV) { 898 if (prv_data->dev_attr != MG_BOOT_DEV) {
897 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO, 899 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 11cc9522cdd4..92250af84e7d 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -4224,6 +4224,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
4224 dd->isr_workq = create_workqueue(dd->workq_name); 4224 dd->isr_workq = create_workqueue(dd->workq_name);
4225 if (!dd->isr_workq) { 4225 if (!dd->isr_workq) {
4226 dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); 4226 dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
4227 rv = -ENOMEM;
4227 goto block_initialize_err; 4228 goto block_initialize_err;
4228 } 4229 }
4229 4230
@@ -4282,7 +4283,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
4282 INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); 4283 INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
4283 4284
4284 pci_set_master(pdev); 4285 pci_set_master(pdev);
4285 if (pci_enable_msi(pdev)) { 4286 rv = pci_enable_msi(pdev);
4287 if (rv) {
4286 dev_warn(&pdev->dev, 4288 dev_warn(&pdev->dev,
4287 "Unable to enable MSI interrupt.\n"); 4289 "Unable to enable MSI interrupt.\n");
4288 goto block_initialize_err; 4290 goto block_initialize_err;
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 07fb2dfaae13..9dcefe40380b 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -135,6 +135,7 @@ static inline void _nvme_check_size(void)
135 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); 135 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
136 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); 136 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
137 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 137 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
138 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
138} 139}
139 140
140typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, 141typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
@@ -237,7 +238,8 @@ static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
237 *fn = special_completion; 238 *fn = special_completion;
238 return CMD_CTX_INVALID; 239 return CMD_CTX_INVALID;
239 } 240 }
240 *fn = info[cmdid].fn; 241 if (fn)
242 *fn = info[cmdid].fn;
241 ctx = info[cmdid].ctx; 243 ctx = info[cmdid].ctx;
242 info[cmdid].fn = special_completion; 244 info[cmdid].fn = special_completion;
243 info[cmdid].ctx = CMD_CTX_COMPLETED; 245 info[cmdid].ctx = CMD_CTX_COMPLETED;
@@ -335,6 +337,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
335 iod->offset = offsetof(struct nvme_iod, sg[nseg]); 337 iod->offset = offsetof(struct nvme_iod, sg[nseg]);
336 iod->npages = -1; 338 iod->npages = -1;
337 iod->length = nbytes; 339 iod->length = nbytes;
340 iod->nents = 0;
338 } 341 }
339 342
340 return iod; 343 return iod;
@@ -375,7 +378,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
375 struct bio *bio = iod->private; 378 struct bio *bio = iod->private;
376 u16 status = le16_to_cpup(&cqe->status) >> 1; 379 u16 status = le16_to_cpup(&cqe->status) >> 1;
377 380
378 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 381 if (iod->nents)
382 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
379 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 383 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
380 nvme_free_iod(dev, iod); 384 nvme_free_iod(dev, iod);
381 if (status) { 385 if (status) {
@@ -589,7 +593,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
589 593
590 result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); 594 result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
591 if (result < 0) 595 if (result < 0)
592 goto free_iod; 596 goto free_cmdid;
593 length = result; 597 length = result;
594 598
595 cmnd->rw.command_id = cmdid; 599 cmnd->rw.command_id = cmdid;
@@ -609,6 +613,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
609 613
610 return 0; 614 return 0;
611 615
616 free_cmdid:
617 free_cmdid(nvmeq, cmdid, NULL);
612 free_iod: 618 free_iod:
613 nvme_free_iod(nvmeq->dev, iod); 619 nvme_free_iod(nvmeq->dev, iod);
614 nomem: 620 nomem:
@@ -835,8 +841,8 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
835 return nvme_submit_admin_cmd(dev, &c, NULL); 841 return nvme_submit_admin_cmd(dev, &c, NULL);
836} 842}
837 843
838static int nvme_get_features(struct nvme_dev *dev, unsigned fid, 844static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
839 unsigned nsid, dma_addr_t dma_addr) 845 dma_addr_t dma_addr, u32 *result)
840{ 846{
841 struct nvme_command c; 847 struct nvme_command c;
842 848
@@ -846,7 +852,7 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
846 c.features.prp1 = cpu_to_le64(dma_addr); 852 c.features.prp1 = cpu_to_le64(dma_addr);
847 c.features.fid = cpu_to_le32(fid); 853 c.features.fid = cpu_to_le32(fid);
848 854
849 return nvme_submit_admin_cmd(dev, &c, NULL); 855 return nvme_submit_admin_cmd(dev, &c, result);
850} 856}
851 857
852static int nvme_set_features(struct nvme_dev *dev, unsigned fid, 858static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
@@ -906,6 +912,10 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
906 912
907 spin_lock_irq(&nvmeq->q_lock); 913 spin_lock_irq(&nvmeq->q_lock);
908 nvme_cancel_ios(nvmeq, false); 914 nvme_cancel_ios(nvmeq, false);
915 while (bio_list_peek(&nvmeq->sq_cong)) {
916 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
917 bio_endio(bio, -EIO);
918 }
909 spin_unlock_irq(&nvmeq->q_lock); 919 spin_unlock_irq(&nvmeq->q_lock);
910 920
911 irq_set_affinity_hint(vector, NULL); 921 irq_set_affinity_hint(vector, NULL);
@@ -1230,12 +1240,17 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
1230 if (length != cmd.data_len) 1240 if (length != cmd.data_len)
1231 status = -ENOMEM; 1241 status = -ENOMEM;
1232 else 1242 else
1233 status = nvme_submit_admin_cmd(dev, &c, NULL); 1243 status = nvme_submit_admin_cmd(dev, &c, &cmd.result);
1234 1244
1235 if (cmd.data_len) { 1245 if (cmd.data_len) {
1236 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); 1246 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
1237 nvme_free_iod(dev, iod); 1247 nvme_free_iod(dev, iod);
1238 } 1248 }
1249
1250 if (!status && copy_to_user(&ucmd->result, &cmd.result,
1251 sizeof(cmd.result)))
1252 status = -EFAULT;
1253
1239 return status; 1254 return status;
1240} 1255}
1241 1256
@@ -1523,9 +1538,9 @@ static int nvme_dev_add(struct nvme_dev *dev)
1523 continue; 1538 continue;
1524 1539
1525 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, 1540 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
1526 dma_addr + 4096); 1541 dma_addr + 4096, NULL);
1527 if (res) 1542 if (res)
1528 continue; 1543 memset(mem + 4096, 0, 4096);
1529 1544
1530 ns = nvme_alloc_ns(dev, i, mem, mem + 4096); 1545 ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
1531 if (ns) 1546 if (ns)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 11e179826b60..6b2b039c191f 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1264,6 +1264,32 @@ static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1264 return atomic_read(&obj_request->done) != 0; 1264 return atomic_read(&obj_request->done) != 0;
1265} 1265}
1266 1266
1267static void
1268rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1269{
1270 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1271 obj_request, obj_request->img_request, obj_request->result,
1272 obj_request->xferred, obj_request->length);
1273 /*
1274 * ENOENT means a hole in the image. We zero-fill the
1275 * entire length of the request. A short read also implies
1276 * zero-fill to the end of the request. Either way we
1277 * update the xferred count to indicate the whole request
1278 * was satisfied.
1279 */
1280 BUG_ON(obj_request->type != OBJ_REQUEST_BIO);
1281 if (obj_request->result == -ENOENT) {
1282 zero_bio_chain(obj_request->bio_list, 0);
1283 obj_request->result = 0;
1284 obj_request->xferred = obj_request->length;
1285 } else if (obj_request->xferred < obj_request->length &&
1286 !obj_request->result) {
1287 zero_bio_chain(obj_request->bio_list, obj_request->xferred);
1288 obj_request->xferred = obj_request->length;
1289 }
1290 obj_request_done_set(obj_request);
1291}
1292
1267static void rbd_obj_request_complete(struct rbd_obj_request *obj_request) 1293static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1268{ 1294{
1269 dout("%s: obj %p cb %p\n", __func__, obj_request, 1295 dout("%s: obj %p cb %p\n", __func__, obj_request,
@@ -1284,23 +1310,10 @@ static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1284{ 1310{
1285 dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request, 1311 dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
1286 obj_request->result, obj_request->xferred, obj_request->length); 1312 obj_request->result, obj_request->xferred, obj_request->length);
1287 /* 1313 if (obj_request->img_request)
1288 * ENOENT means a hole in the object. We zero-fill the 1314 rbd_img_obj_request_read_callback(obj_request);
1289 * entire length of the request. A short read also implies 1315 else
1290 * zero-fill to the end of the request. Either way we 1316 obj_request_done_set(obj_request);
1291 * update the xferred count to indicate the whole request
1292 * was satisfied.
1293 */
1294 if (obj_request->result == -ENOENT) {
1295 zero_bio_chain(obj_request->bio_list, 0);
1296 obj_request->result = 0;
1297 obj_request->xferred = obj_request->length;
1298 } else if (obj_request->xferred < obj_request->length &&
1299 !obj_request->result) {
1300 zero_bio_chain(obj_request->bio_list, obj_request->xferred);
1301 obj_request->xferred = obj_request->length;
1302 }
1303 obj_request_done_set(obj_request);
1304} 1317}
1305 1318
1306static void rbd_osd_write_callback(struct rbd_obj_request *obj_request) 1319static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
diff --git a/drivers/block/rsxx/Makefile b/drivers/block/rsxx/Makefile
index f35cd0b71f7b..b1c53c0aa450 100644
--- a/drivers/block/rsxx/Makefile
+++ b/drivers/block/rsxx/Makefile
@@ -1,2 +1,2 @@
1obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o 1obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
2rsxx-y := config.o core.o cregs.o dev.o dma.o 2rsxx-objs := config.o core.o cregs.o dev.o dma.o
diff --git a/drivers/block/rsxx/config.c b/drivers/block/rsxx/config.c
index a295e7e9ee41..10cd530d3e10 100644
--- a/drivers/block/rsxx/config.c
+++ b/drivers/block/rsxx/config.c
@@ -29,15 +29,13 @@
29#include "rsxx_priv.h" 29#include "rsxx_priv.h"
30#include "rsxx_cfg.h" 30#include "rsxx_cfg.h"
31 31
32static void initialize_config(void *config) 32static void initialize_config(struct rsxx_card_cfg *cfg)
33{ 33{
34 struct rsxx_card_cfg *cfg = config;
35
36 cfg->hdr.version = RSXX_CFG_VERSION; 34 cfg->hdr.version = RSXX_CFG_VERSION;
37 35
38 cfg->data.block_size = RSXX_HW_BLK_SIZE; 36 cfg->data.block_size = RSXX_HW_BLK_SIZE;
39 cfg->data.stripe_size = RSXX_HW_BLK_SIZE; 37 cfg->data.stripe_size = RSXX_HW_BLK_SIZE;
40 cfg->data.vendor_id = RSXX_VENDOR_ID_TMS_IBM; 38 cfg->data.vendor_id = RSXX_VENDOR_ID_IBM;
41 cfg->data.cache_order = (-1); 39 cfg->data.cache_order = (-1);
42 cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED; 40 cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED;
43 cfg->data.intr_coal.count = 0; 41 cfg->data.intr_coal.count = 0;
@@ -181,7 +179,7 @@ int rsxx_load_config(struct rsxx_cardinfo *card)
181 } else { 179 } else {
182 dev_info(CARD_TO_DEV(card), 180 dev_info(CARD_TO_DEV(card),
183 "Initializing card configuration.\n"); 181 "Initializing card configuration.\n");
184 initialize_config(card); 182 initialize_config(&card->config);
185 st = rsxx_save_config(card); 183 st = rsxx_save_config(card);
186 if (st) 184 if (st)
187 return st; 185 return st;
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index e5162487686a..5af21f2db29c 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -30,6 +30,7 @@
30#include <linux/reboot.h> 30#include <linux/reboot.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <linux/delay.h>
33 34
34#include <linux/genhd.h> 35#include <linux/genhd.h>
35#include <linux/idr.h> 36#include <linux/idr.h>
@@ -39,8 +40,8 @@
39 40
40#define NO_LEGACY 0 41#define NO_LEGACY 0
41 42
42MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver"); 43MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver");
43MODULE_AUTHOR("IBM <support@ramsan.com>"); 44MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
44MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
45MODULE_VERSION(DRIVER_VERSION); 46MODULE_VERSION(DRIVER_VERSION);
46 47
@@ -52,6 +53,13 @@ static DEFINE_IDA(rsxx_disk_ida);
52static DEFINE_SPINLOCK(rsxx_ida_lock); 53static DEFINE_SPINLOCK(rsxx_ida_lock);
53 54
54/*----------------- Interrupt Control & Handling -------------------*/ 55/*----------------- Interrupt Control & Handling -------------------*/
56
57static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
58{
59 card->isr_mask = 0;
60 card->ier_mask = 0;
61}
62
55static void __enable_intr(unsigned int *mask, unsigned int intr) 63static void __enable_intr(unsigned int *mask, unsigned int intr)
56{ 64{
57 *mask |= intr; 65 *mask |= intr;
@@ -71,7 +79,8 @@ static void __disable_intr(unsigned int *mask, unsigned int intr)
71 */ 79 */
72void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) 80void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
73{ 81{
74 if (unlikely(card->halt)) 82 if (unlikely(card->halt) ||
83 unlikely(card->eeh_state))
75 return; 84 return;
76 85
77 __enable_intr(&card->ier_mask, intr); 86 __enable_intr(&card->ier_mask, intr);
@@ -80,6 +89,9 @@ void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
80 89
81void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) 90void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
82{ 91{
92 if (unlikely(card->eeh_state))
93 return;
94
83 __disable_intr(&card->ier_mask, intr); 95 __disable_intr(&card->ier_mask, intr);
84 iowrite32(card->ier_mask, card->regmap + IER); 96 iowrite32(card->ier_mask, card->regmap + IER);
85} 97}
@@ -87,7 +99,8 @@ void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
87void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, 99void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
88 unsigned int intr) 100 unsigned int intr)
89{ 101{
90 if (unlikely(card->halt)) 102 if (unlikely(card->halt) ||
103 unlikely(card->eeh_state))
91 return; 104 return;
92 105
93 __enable_intr(&card->isr_mask, intr); 106 __enable_intr(&card->isr_mask, intr);
@@ -97,6 +110,9 @@ void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
97void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card, 110void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
98 unsigned int intr) 111 unsigned int intr)
99{ 112{
113 if (unlikely(card->eeh_state))
114 return;
115
100 __disable_intr(&card->isr_mask, intr); 116 __disable_intr(&card->isr_mask, intr);
101 __disable_intr(&card->ier_mask, intr); 117 __disable_intr(&card->ier_mask, intr);
102 iowrite32(card->ier_mask, card->regmap + IER); 118 iowrite32(card->ier_mask, card->regmap + IER);
@@ -115,6 +131,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
115 do { 131 do {
116 reread_isr = 0; 132 reread_isr = 0;
117 133
134 if (unlikely(card->eeh_state))
135 break;
136
118 isr = ioread32(card->regmap + ISR); 137 isr = ioread32(card->regmap + ISR);
119 if (isr == 0xffffffff) { 138 if (isr == 0xffffffff) {
120 /* 139 /*
@@ -161,9 +180,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
161} 180}
162 181
163/*----------------- Card Event Handler -------------------*/ 182/*----------------- Card Event Handler -------------------*/
164static char *rsxx_card_state_to_str(unsigned int state) 183static const char * const rsxx_card_state_to_str(unsigned int state)
165{ 184{
166 static char *state_strings[] = { 185 static const char * const state_strings[] = {
167 "Unknown", "Shutdown", "Starting", "Formatting", 186 "Unknown", "Shutdown", "Starting", "Formatting",
168 "Uninitialized", "Good", "Shutting Down", 187 "Uninitialized", "Good", "Shutting Down",
169 "Fault", "Read Only Fault", "dStroying" 188 "Fault", "Read Only Fault", "dStroying"
@@ -304,6 +323,192 @@ static int card_shutdown(struct rsxx_cardinfo *card)
304 return 0; 323 return 0;
305} 324}
306 325
326static int rsxx_eeh_frozen(struct pci_dev *dev)
327{
328 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
329 int i;
330 int st;
331
332 dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n");
333
334 card->eeh_state = 1;
335 rsxx_mask_interrupts(card);
336
337 /*
338 * We need to guarantee that the write for eeh_state and masking
339 * interrupts does not become reordered. This will prevent a possible
340 * race condition with the EEH code.
341 */
342 wmb();
343
344 pci_disable_device(dev);
345
346 st = rsxx_eeh_save_issued_dmas(card);
347 if (st)
348 return st;
349
350 rsxx_eeh_save_issued_creg(card);
351
352 for (i = 0; i < card->n_targets; i++) {
353 if (card->ctrl[i].status.buf)
354 pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
355 card->ctrl[i].status.buf,
356 card->ctrl[i].status.dma_addr);
357 if (card->ctrl[i].cmd.buf)
358 pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
359 card->ctrl[i].cmd.buf,
360 card->ctrl[i].cmd.dma_addr);
361 }
362
363 return 0;
364}
365
366static void rsxx_eeh_failure(struct pci_dev *dev)
367{
368 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
369 int i;
370
371 dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n");
372
373 card->eeh_state = 1;
374
375 for (i = 0; i < card->n_targets; i++)
376 del_timer_sync(&card->ctrl[i].activity_timer);
377
378 rsxx_eeh_cancel_dmas(card);
379}
380
381static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
382{
383 unsigned int status;
384 int iter = 0;
385
386 /* We need to wait for the hardware to reset */
387 while (iter++ < 10) {
388 status = ioread32(card->regmap + PCI_RECONFIG);
389
390 if (status & RSXX_FLUSH_BUSY) {
391 ssleep(1);
392 continue;
393 }
394
395 if (status & RSXX_FLUSH_TIMEOUT)
396 dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n");
397 return 0;
398 }
399
400 /* Hardware failed resetting itself. */
401 return -1;
402}
403
404static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
405 enum pci_channel_state error)
406{
407 int st;
408
409 if (dev->revision < RSXX_EEH_SUPPORT)
410 return PCI_ERS_RESULT_NONE;
411
412 if (error == pci_channel_io_perm_failure) {
413 rsxx_eeh_failure(dev);
414 return PCI_ERS_RESULT_DISCONNECT;
415 }
416
417 st = rsxx_eeh_frozen(dev);
418 if (st) {
419 dev_err(&dev->dev, "Slot reset setup failed\n");
420 rsxx_eeh_failure(dev);
421 return PCI_ERS_RESULT_DISCONNECT;
422 }
423
424 return PCI_ERS_RESULT_NEED_RESET;
425}
426
427static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
428{
429 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
430 unsigned long flags;
431 int i;
432 int st;
433
434 dev_warn(&dev->dev,
435 "IBM FlashSystem PCI: recovering from slot reset.\n");
436
437 st = pci_enable_device(dev);
438 if (st)
439 goto failed_hw_setup;
440
441 pci_set_master(dev);
442
443 st = rsxx_eeh_fifo_flush_poll(card);
444 if (st)
445 goto failed_hw_setup;
446
447 rsxx_dma_queue_reset(card);
448
449 for (i = 0; i < card->n_targets; i++) {
450 st = rsxx_hw_buffers_init(dev, &card->ctrl[i]);
451 if (st)
452 goto failed_hw_buffers_init;
453 }
454
455 if (card->config_valid)
456 rsxx_dma_configure(card);
457
458 /* Clears the ISR register from spurious interrupts */
459 st = ioread32(card->regmap + ISR);
460
461 card->eeh_state = 0;
462
463 st = rsxx_eeh_remap_dmas(card);
464 if (st)
465 goto failed_remap_dmas;
466
467 spin_lock_irqsave(&card->irq_lock, flags);
468 if (card->n_targets & RSXX_MAX_TARGETS)
469 rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
470 else
471 rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C);
472 spin_unlock_irqrestore(&card->irq_lock, flags);
473
474 rsxx_kick_creg_queue(card);
475
476 for (i = 0; i < card->n_targets; i++) {
477 spin_lock(&card->ctrl[i].queue_lock);
478 if (list_empty(&card->ctrl[i].queue)) {
479 spin_unlock(&card->ctrl[i].queue_lock);
480 continue;
481 }
482 spin_unlock(&card->ctrl[i].queue_lock);
483
484 queue_work(card->ctrl[i].issue_wq,
485 &card->ctrl[i].issue_dma_work);
486 }
487
488 dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n");
489
490 return PCI_ERS_RESULT_RECOVERED;
491
492failed_hw_buffers_init:
493failed_remap_dmas:
494 for (i = 0; i < card->n_targets; i++) {
495 if (card->ctrl[i].status.buf)
496 pci_free_consistent(card->dev,
497 STATUS_BUFFER_SIZE8,
498 card->ctrl[i].status.buf,
499 card->ctrl[i].status.dma_addr);
500 if (card->ctrl[i].cmd.buf)
501 pci_free_consistent(card->dev,
502 COMMAND_BUFFER_SIZE8,
503 card->ctrl[i].cmd.buf,
504 card->ctrl[i].cmd.dma_addr);
505 }
506failed_hw_setup:
507 rsxx_eeh_failure(dev);
508 return PCI_ERS_RESULT_DISCONNECT;
509
510}
511
307/*----------------- Driver Initialization & Setup -------------------*/ 512/*----------------- Driver Initialization & Setup -------------------*/
308/* Returns: 0 if the driver is compatible with the device 513/* Returns: 0 if the driver is compatible with the device
309 -1 if the driver is NOT compatible with the device */ 514 -1 if the driver is NOT compatible with the device */
@@ -383,6 +588,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
383 588
384 spin_lock_init(&card->irq_lock); 589 spin_lock_init(&card->irq_lock);
385 card->halt = 0; 590 card->halt = 0;
591 card->eeh_state = 0;
386 592
387 spin_lock_irq(&card->irq_lock); 593 spin_lock_irq(&card->irq_lock);
388 rsxx_disable_ier_and_isr(card, CR_INTR_ALL); 594 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
@@ -538,9 +744,6 @@ static void rsxx_pci_remove(struct pci_dev *dev)
538 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); 744 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
539 spin_unlock_irqrestore(&card->irq_lock, flags); 745 spin_unlock_irqrestore(&card->irq_lock, flags);
540 746
541 /* Prevent work_structs from re-queuing themselves. */
542 card->halt = 1;
543
544 cancel_work_sync(&card->event_work); 747 cancel_work_sync(&card->event_work);
545 748
546 rsxx_destroy_dev(card); 749 rsxx_destroy_dev(card);
@@ -549,6 +752,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
549 spin_lock_irqsave(&card->irq_lock, flags); 752 spin_lock_irqsave(&card->irq_lock, flags);
550 rsxx_disable_ier_and_isr(card, CR_INTR_ALL); 753 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
551 spin_unlock_irqrestore(&card->irq_lock, flags); 754 spin_unlock_irqrestore(&card->irq_lock, flags);
755
756 /* Prevent work_structs from re-queuing themselves. */
757 card->halt = 1;
758
552 free_irq(dev->irq, card); 759 free_irq(dev->irq, card);
553 760
554 if (!force_legacy) 761 if (!force_legacy)
@@ -592,11 +799,14 @@ static void rsxx_pci_shutdown(struct pci_dev *dev)
592 card_shutdown(card); 799 card_shutdown(card);
593} 800}
594 801
802static const struct pci_error_handlers rsxx_err_handler = {
803 .error_detected = rsxx_error_detected,
804 .slot_reset = rsxx_slot_reset,
805};
806
595static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = { 807static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
596 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)}, 808 {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)},
597 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)}, 809 {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)},
598 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
599 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
600 {0,}, 810 {0,},
601}; 811};
602 812
@@ -609,6 +819,7 @@ static struct pci_driver rsxx_pci_driver = {
609 .remove = rsxx_pci_remove, 819 .remove = rsxx_pci_remove,
610 .suspend = rsxx_pci_suspend, 820 .suspend = rsxx_pci_suspend,
611 .shutdown = rsxx_pci_shutdown, 821 .shutdown = rsxx_pci_shutdown,
822 .err_handler = &rsxx_err_handler,
612}; 823};
613 824
614static int __init rsxx_core_init(void) 825static int __init rsxx_core_init(void)
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
index 80bbe639fccd..4b5c020a0a65 100644
--- a/drivers/block/rsxx/cregs.c
+++ b/drivers/block/rsxx/cregs.c
@@ -58,7 +58,7 @@ static struct kmem_cache *creg_cmd_pool;
58#error Unknown endianess!!! Aborting... 58#error Unknown endianess!!! Aborting...
59#endif 59#endif
60 60
61static void copy_to_creg_data(struct rsxx_cardinfo *card, 61static int copy_to_creg_data(struct rsxx_cardinfo *card,
62 int cnt8, 62 int cnt8,
63 void *buf, 63 void *buf,
64 unsigned int stream) 64 unsigned int stream)
@@ -66,6 +66,9 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
66 int i = 0; 66 int i = 0;
67 u32 *data = buf; 67 u32 *data = buf;
68 68
69 if (unlikely(card->eeh_state))
70 return -EIO;
71
69 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { 72 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
70 /* 73 /*
71 * Firmware implementation makes it necessary to byte swap on 74 * Firmware implementation makes it necessary to byte swap on
@@ -76,10 +79,12 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
76 else 79 else
77 iowrite32(data[i], card->regmap + CREG_DATA(i)); 80 iowrite32(data[i], card->regmap + CREG_DATA(i));
78 } 81 }
82
83 return 0;
79} 84}
80 85
81 86
82static void copy_from_creg_data(struct rsxx_cardinfo *card, 87static int copy_from_creg_data(struct rsxx_cardinfo *card,
83 int cnt8, 88 int cnt8,
84 void *buf, 89 void *buf,
85 unsigned int stream) 90 unsigned int stream)
@@ -87,6 +92,9 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
87 int i = 0; 92 int i = 0;
88 u32 *data = buf; 93 u32 *data = buf;
89 94
95 if (unlikely(card->eeh_state))
96 return -EIO;
97
90 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { 98 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
91 /* 99 /*
92 * Firmware implementation makes it necessary to byte swap on 100 * Firmware implementation makes it necessary to byte swap on
@@ -97,41 +105,31 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
97 else 105 else
98 data[i] = ioread32(card->regmap + CREG_DATA(i)); 106 data[i] = ioread32(card->regmap + CREG_DATA(i));
99 } 107 }
100}
101
102static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
103{
104 struct creg_cmd *cmd;
105 108
106 /* 109 return 0;
107 * Spin lock is needed because this can be called in atomic/interrupt
108 * context.
109 */
110 spin_lock_bh(&card->creg_ctrl.lock);
111 cmd = card->creg_ctrl.active_cmd;
112 card->creg_ctrl.active_cmd = NULL;
113 spin_unlock_bh(&card->creg_ctrl.lock);
114
115 return cmd;
116} 110}
117 111
118static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd) 112static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
119{ 113{
114 int st;
115
116 if (unlikely(card->eeh_state))
117 return;
118
120 iowrite32(cmd->addr, card->regmap + CREG_ADD); 119 iowrite32(cmd->addr, card->regmap + CREG_ADD);
121 iowrite32(cmd->cnt8, card->regmap + CREG_CNT); 120 iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
122 121
123 if (cmd->op == CREG_OP_WRITE) { 122 if (cmd->op == CREG_OP_WRITE) {
124 if (cmd->buf) 123 if (cmd->buf) {
125 copy_to_creg_data(card, cmd->cnt8, 124 st = copy_to_creg_data(card, cmd->cnt8,
126 cmd->buf, cmd->stream); 125 cmd->buf, cmd->stream);
126 if (st)
127 return;
128 }
127 } 129 }
128 130
129 /* 131 if (unlikely(card->eeh_state))
130 * Data copy must complete before initiating the command. This is 132 return;
131 * needed for weakly ordered processors (i.e. PowerPC), so that all
132 * neccessary registers are written before we kick the hardware.
133 */
134 wmb();
135 133
136 /* Setting the valid bit will kick off the command. */ 134 /* Setting the valid bit will kick off the command. */
137 iowrite32(cmd->op, card->regmap + CREG_CMD); 135 iowrite32(cmd->op, card->regmap + CREG_CMD);
@@ -196,11 +194,11 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card,
196 cmd->cb_private = cb_private; 194 cmd->cb_private = cb_private;
197 cmd->status = 0; 195 cmd->status = 0;
198 196
199 spin_lock(&card->creg_ctrl.lock); 197 spin_lock_bh(&card->creg_ctrl.lock);
200 list_add_tail(&cmd->list, &card->creg_ctrl.queue); 198 list_add_tail(&cmd->list, &card->creg_ctrl.queue);
201 card->creg_ctrl.q_depth++; 199 card->creg_ctrl.q_depth++;
202 creg_kick_queue(card); 200 creg_kick_queue(card);
203 spin_unlock(&card->creg_ctrl.lock); 201 spin_unlock_bh(&card->creg_ctrl.lock);
204 202
205 return 0; 203 return 0;
206} 204}
@@ -210,7 +208,11 @@ static void creg_cmd_timed_out(unsigned long data)
210 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data; 208 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
211 struct creg_cmd *cmd; 209 struct creg_cmd *cmd;
212 210
213 cmd = pop_active_cmd(card); 211 spin_lock(&card->creg_ctrl.lock);
212 cmd = card->creg_ctrl.active_cmd;
213 card->creg_ctrl.active_cmd = NULL;
214 spin_unlock(&card->creg_ctrl.lock);
215
214 if (cmd == NULL) { 216 if (cmd == NULL) {
215 card->creg_ctrl.creg_stats.creg_timeout++; 217 card->creg_ctrl.creg_stats.creg_timeout++;
216 dev_warn(CARD_TO_DEV(card), 218 dev_warn(CARD_TO_DEV(card),
@@ -247,7 +249,11 @@ static void creg_cmd_done(struct work_struct *work)
247 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0) 249 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
248 card->creg_ctrl.creg_stats.failed_cancel_timer++; 250 card->creg_ctrl.creg_stats.failed_cancel_timer++;
249 251
250 cmd = pop_active_cmd(card); 252 spin_lock_bh(&card->creg_ctrl.lock);
253 cmd = card->creg_ctrl.active_cmd;
254 card->creg_ctrl.active_cmd = NULL;
255 spin_unlock_bh(&card->creg_ctrl.lock);
256
251 if (cmd == NULL) { 257 if (cmd == NULL) {
252 dev_err(CARD_TO_DEV(card), 258 dev_err(CARD_TO_DEV(card),
253 "Spurious creg interrupt!\n"); 259 "Spurious creg interrupt!\n");
@@ -287,7 +293,7 @@ static void creg_cmd_done(struct work_struct *work)
287 goto creg_done; 293 goto creg_done;
288 } 294 }
289 295
290 copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream); 296 st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
291 } 297 }
292 298
293creg_done: 299creg_done:
@@ -296,10 +302,10 @@ creg_done:
296 302
297 kmem_cache_free(creg_cmd_pool, cmd); 303 kmem_cache_free(creg_cmd_pool, cmd);
298 304
299 spin_lock(&card->creg_ctrl.lock); 305 spin_lock_bh(&card->creg_ctrl.lock);
300 card->creg_ctrl.active = 0; 306 card->creg_ctrl.active = 0;
301 creg_kick_queue(card); 307 creg_kick_queue(card);
302 spin_unlock(&card->creg_ctrl.lock); 308 spin_unlock_bh(&card->creg_ctrl.lock);
303} 309}
304 310
305static void creg_reset(struct rsxx_cardinfo *card) 311static void creg_reset(struct rsxx_cardinfo *card)
@@ -324,7 +330,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
324 "Resetting creg interface for recovery\n"); 330 "Resetting creg interface for recovery\n");
325 331
326 /* Cancel outstanding commands */ 332 /* Cancel outstanding commands */
327 spin_lock(&card->creg_ctrl.lock); 333 spin_lock_bh(&card->creg_ctrl.lock);
328 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { 334 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
329 list_del(&cmd->list); 335 list_del(&cmd->list);
330 card->creg_ctrl.q_depth--; 336 card->creg_ctrl.q_depth--;
@@ -345,7 +351,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
345 351
346 card->creg_ctrl.active = 0; 352 card->creg_ctrl.active = 0;
347 } 353 }
348 spin_unlock(&card->creg_ctrl.lock); 354 spin_unlock_bh(&card->creg_ctrl.lock);
349 355
350 card->creg_ctrl.reset = 0; 356 card->creg_ctrl.reset = 0;
351 spin_lock_irqsave(&card->irq_lock, flags); 357 spin_lock_irqsave(&card->irq_lock, flags);
@@ -399,12 +405,12 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card,
399 return st; 405 return st;
400 406
401 /* 407 /*
402 * This timeout is neccessary for unresponsive hardware. The additional 408 * This timeout is necessary for unresponsive hardware. The additional
403 * 20 seconds to used to guarantee that each cregs requests has time to 409 * 20 seconds to used to guarantee that each cregs requests has time to
404 * complete. 410 * complete.
405 */ 411 */
406 timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC * 412 timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
407 card->creg_ctrl.q_depth) + 20000); 413 card->creg_ctrl.q_depth + 20000);
408 414
409 /* 415 /*
410 * The creg interface is guaranteed to complete. It has a timeout 416 * The creg interface is guaranteed to complete. It has a timeout
@@ -690,6 +696,32 @@ int rsxx_reg_access(struct rsxx_cardinfo *card,
690 return 0; 696 return 0;
691} 697}
692 698
699void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
700{
701 struct creg_cmd *cmd = NULL;
702
703 cmd = card->creg_ctrl.active_cmd;
704 card->creg_ctrl.active_cmd = NULL;
705
706 if (cmd) {
707 del_timer_sync(&card->creg_ctrl.cmd_timer);
708
709 spin_lock_bh(&card->creg_ctrl.lock);
710 list_add(&cmd->list, &card->creg_ctrl.queue);
711 card->creg_ctrl.q_depth++;
712 card->creg_ctrl.active = 0;
713 spin_unlock_bh(&card->creg_ctrl.lock);
714 }
715}
716
717void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
718{
719 spin_lock_bh(&card->creg_ctrl.lock);
720 if (!list_empty(&card->creg_ctrl.queue))
721 creg_kick_queue(card);
722 spin_unlock_bh(&card->creg_ctrl.lock);
723}
724
693/*------------ Initialization & Setup --------------*/ 725/*------------ Initialization & Setup --------------*/
694int rsxx_creg_setup(struct rsxx_cardinfo *card) 726int rsxx_creg_setup(struct rsxx_cardinfo *card)
695{ 727{
@@ -712,7 +744,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
712 int cnt = 0; 744 int cnt = 0;
713 745
714 /* Cancel outstanding commands */ 746 /* Cancel outstanding commands */
715 spin_lock(&card->creg_ctrl.lock); 747 spin_lock_bh(&card->creg_ctrl.lock);
716 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { 748 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
717 list_del(&cmd->list); 749 list_del(&cmd->list);
718 if (cmd->cb) 750 if (cmd->cb)
@@ -737,7 +769,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
737 "Canceled active creg command\n"); 769 "Canceled active creg command\n");
738 kmem_cache_free(creg_cmd_pool, cmd); 770 kmem_cache_free(creg_cmd_pool, cmd);
739 } 771 }
740 spin_unlock(&card->creg_ctrl.lock); 772 spin_unlock_bh(&card->creg_ctrl.lock);
741 773
742 cancel_work_sync(&card->creg_ctrl.done_work); 774 cancel_work_sync(&card->creg_ctrl.done_work);
743} 775}
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 63176e67662f..0607513cfb41 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -28,7 +28,7 @@
28struct rsxx_dma { 28struct rsxx_dma {
29 struct list_head list; 29 struct list_head list;
30 u8 cmd; 30 u8 cmd;
31 unsigned int laddr; /* Logical address on the ramsan */ 31 unsigned int laddr; /* Logical address */
32 struct { 32 struct {
33 u32 off; 33 u32 off;
34 u32 cnt; 34 u32 cnt;
@@ -81,9 +81,6 @@ enum rsxx_hw_status {
81 HW_STATUS_FAULT = 0x08, 81 HW_STATUS_FAULT = 0x08,
82}; 82};
83 83
84#define STATUS_BUFFER_SIZE8 4096
85#define COMMAND_BUFFER_SIZE8 4096
86
87static struct kmem_cache *rsxx_dma_pool; 84static struct kmem_cache *rsxx_dma_pool;
88 85
89struct dma_tracker { 86struct dma_tracker {
@@ -122,7 +119,7 @@ static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
122 return tgt; 119 return tgt;
123} 120}
124 121
125static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) 122void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
126{ 123{
127 /* Reset all DMA Command/Status Queues */ 124 /* Reset all DMA Command/Status Queues */
128 iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); 125 iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
@@ -210,7 +207,8 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
210 u32 q_depth = 0; 207 u32 q_depth = 0;
211 u32 intr_coal; 208 u32 intr_coal;
212 209
213 if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE) 210 if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE ||
211 unlikely(card->eeh_state))
214 return; 212 return;
215 213
216 for (i = 0; i < card->n_targets; i++) 214 for (i = 0; i < card->n_targets; i++)
@@ -223,31 +221,26 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
223} 221}
224 222
225/*----------------- RSXX DMA Handling -------------------*/ 223/*----------------- RSXX DMA Handling -------------------*/
226static void rsxx_complete_dma(struct rsxx_cardinfo *card, 224static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
227 struct rsxx_dma *dma, 225 struct rsxx_dma *dma,
228 unsigned int status) 226 unsigned int status)
229{ 227{
230 if (status & DMA_SW_ERR) 228 if (status & DMA_SW_ERR)
231 printk_ratelimited(KERN_ERR 229 ctrl->stats.dma_sw_err++;
232 "SW Error in DMA(cmd x%02x, laddr x%08x)\n",
233 dma->cmd, dma->laddr);
234 if (status & DMA_HW_FAULT) 230 if (status & DMA_HW_FAULT)
235 printk_ratelimited(KERN_ERR 231 ctrl->stats.dma_hw_fault++;
236 "HW Fault in DMA(cmd x%02x, laddr x%08x)\n",
237 dma->cmd, dma->laddr);
238 if (status & DMA_CANCELLED) 232 if (status & DMA_CANCELLED)
239 printk_ratelimited(KERN_ERR 233 ctrl->stats.dma_cancelled++;
240 "DMA Cancelled(cmd x%02x, laddr x%08x)\n",
241 dma->cmd, dma->laddr);
242 234
243 if (dma->dma_addr) 235 if (dma->dma_addr)
244 pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma), 236 pci_unmap_page(ctrl->card->dev, dma->dma_addr,
237 get_dma_size(dma),
245 dma->cmd == HW_CMD_BLK_WRITE ? 238 dma->cmd == HW_CMD_BLK_WRITE ?
246 PCI_DMA_TODEVICE : 239 PCI_DMA_TODEVICE :
247 PCI_DMA_FROMDEVICE); 240 PCI_DMA_FROMDEVICE);
248 241
249 if (dma->cb) 242 if (dma->cb)
250 dma->cb(card, dma->cb_data, status ? 1 : 0); 243 dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
251 244
252 kmem_cache_free(rsxx_dma_pool, dma); 245 kmem_cache_free(rsxx_dma_pool, dma);
253} 246}
@@ -330,14 +323,15 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
330 if (requeue_cmd) 323 if (requeue_cmd)
331 rsxx_requeue_dma(ctrl, dma); 324 rsxx_requeue_dma(ctrl, dma);
332 else 325 else
333 rsxx_complete_dma(ctrl->card, dma, status); 326 rsxx_complete_dma(ctrl, dma, status);
334} 327}
335 328
336static void dma_engine_stalled(unsigned long data) 329static void dma_engine_stalled(unsigned long data)
337{ 330{
338 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; 331 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
339 332
340 if (atomic_read(&ctrl->stats.hw_q_depth) == 0) 333 if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
334 unlikely(ctrl->card->eeh_state))
341 return; 335 return;
342 336
343 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { 337 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
@@ -369,7 +363,8 @@ static void rsxx_issue_dmas(struct work_struct *work)
369 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); 363 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
370 hw_cmd_buf = ctrl->cmd.buf; 364 hw_cmd_buf = ctrl->cmd.buf;
371 365
372 if (unlikely(ctrl->card->halt)) 366 if (unlikely(ctrl->card->halt) ||
367 unlikely(ctrl->card->eeh_state))
373 return; 368 return;
374 369
375 while (1) { 370 while (1) {
@@ -397,7 +392,7 @@ static void rsxx_issue_dmas(struct work_struct *work)
397 */ 392 */
398 if (unlikely(ctrl->card->dma_fault)) { 393 if (unlikely(ctrl->card->dma_fault)) {
399 push_tracker(ctrl->trackers, tag); 394 push_tracker(ctrl->trackers, tag);
400 rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED); 395 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
401 continue; 396 continue;
402 } 397 }
403 398
@@ -432,19 +427,15 @@ static void rsxx_issue_dmas(struct work_struct *work)
432 427
433 /* Let HW know we've queued commands. */ 428 /* Let HW know we've queued commands. */
434 if (cmds_pending) { 429 if (cmds_pending) {
435 /*
436 * We must guarantee that the CPU writes to 'ctrl->cmd.buf'
437 * (which is in PCI-consistent system-memory) from the loop
438 * above make it into the coherency domain before the
439 * following PIO "trigger" updating the cmd.idx. A WMB is
440 * sufficient. We need not explicitly CPU cache-flush since
441 * the memory is a PCI-consistent (ie; coherent) mapping.
442 */
443 wmb();
444
445 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); 430 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
446 mod_timer(&ctrl->activity_timer, 431 mod_timer(&ctrl->activity_timer,
447 jiffies + DMA_ACTIVITY_TIMEOUT); 432 jiffies + DMA_ACTIVITY_TIMEOUT);
433
434 if (unlikely(ctrl->card->eeh_state)) {
435 del_timer_sync(&ctrl->activity_timer);
436 return;
437 }
438
448 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); 439 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
449 } 440 }
450} 441}
@@ -463,7 +454,8 @@ static void rsxx_dma_done(struct work_struct *work)
463 hw_st_buf = ctrl->status.buf; 454 hw_st_buf = ctrl->status.buf;
464 455
465 if (unlikely(ctrl->card->halt) || 456 if (unlikely(ctrl->card->halt) ||
466 unlikely(ctrl->card->dma_fault)) 457 unlikely(ctrl->card->dma_fault) ||
458 unlikely(ctrl->card->eeh_state))
467 return; 459 return;
468 460
469 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); 461 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
@@ -508,7 +500,7 @@ static void rsxx_dma_done(struct work_struct *work)
508 if (status) 500 if (status)
509 rsxx_handle_dma_error(ctrl, dma, status); 501 rsxx_handle_dma_error(ctrl, dma, status);
510 else 502 else
511 rsxx_complete_dma(ctrl->card, dma, 0); 503 rsxx_complete_dma(ctrl, dma, 0);
512 504
513 push_tracker(ctrl->trackers, tag); 505 push_tracker(ctrl->trackers, tag);
514 506
@@ -727,20 +719,54 @@ bvec_err:
727 719
728 720
729/*----------------- DMA Engine Initialization & Setup -------------------*/ 721/*----------------- DMA Engine Initialization & Setup -------------------*/
722int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
723{
724 ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
725 &ctrl->status.dma_addr);
726 ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
727 &ctrl->cmd.dma_addr);
728 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
729 return -ENOMEM;
730
731 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
732 iowrite32(lower_32_bits(ctrl->status.dma_addr),
733 ctrl->regmap + SB_ADD_LO);
734 iowrite32(upper_32_bits(ctrl->status.dma_addr),
735 ctrl->regmap + SB_ADD_HI);
736
737 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
738 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
739 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
740
741 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
742 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
743 dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
744 ctrl->status.idx);
745 return -EINVAL;
746 }
747 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
748 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
749
750 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
751 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
752 dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
753 ctrl->status.idx);
754 return -EINVAL;
755 }
756 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
757 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
758
759 return 0;
760}
761
730static int rsxx_dma_ctrl_init(struct pci_dev *dev, 762static int rsxx_dma_ctrl_init(struct pci_dev *dev,
731 struct rsxx_dma_ctrl *ctrl) 763 struct rsxx_dma_ctrl *ctrl)
732{ 764{
733 int i; 765 int i;
766 int st;
734 767
735 memset(&ctrl->stats, 0, sizeof(ctrl->stats)); 768 memset(&ctrl->stats, 0, sizeof(ctrl->stats));
736 769
737 ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
738 &ctrl->status.dma_addr);
739 ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
740 &ctrl->cmd.dma_addr);
741 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
742 return -ENOMEM;
743
744 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); 770 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
745 if (!ctrl->trackers) 771 if (!ctrl->trackers)
746 return -ENOMEM; 772 return -ENOMEM;
@@ -770,35 +796,9 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
770 INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); 796 INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
771 INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); 797 INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
772 798
773 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); 799 st = rsxx_hw_buffers_init(dev, ctrl);
774 iowrite32(lower_32_bits(ctrl->status.dma_addr), 800 if (st)
775 ctrl->regmap + SB_ADD_LO); 801 return st;
776 iowrite32(upper_32_bits(ctrl->status.dma_addr),
777 ctrl->regmap + SB_ADD_HI);
778
779 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
780 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
781 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
782
783 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
784 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
785 dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
786 ctrl->status.idx);
787 return -EINVAL;
788 }
789 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
790 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
791
792 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
793 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
794 dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
795 ctrl->status.idx);
796 return -EINVAL;
797 }
798 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
799 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
800
801 wmb();
802 802
803 return 0; 803 return 0;
804} 804}
@@ -834,7 +834,7 @@ static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
834 return 0; 834 return 0;
835} 835}
836 836
837static int rsxx_dma_configure(struct rsxx_cardinfo *card) 837int rsxx_dma_configure(struct rsxx_cardinfo *card)
838{ 838{
839 u32 intr_coal; 839 u32 intr_coal;
840 840
@@ -980,6 +980,103 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
980 } 980 }
981} 981}
982 982
983int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
984{
985 int i;
986 int j;
987 int cnt;
988 struct rsxx_dma *dma;
989 struct list_head *issued_dmas;
990
991 issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets,
992 GFP_KERNEL);
993 if (!issued_dmas)
994 return -ENOMEM;
995
996 for (i = 0; i < card->n_targets; i++) {
997 INIT_LIST_HEAD(&issued_dmas[i]);
998 cnt = 0;
999 for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
1000 dma = get_tracker_dma(card->ctrl[i].trackers, j);
1001 if (dma == NULL)
1002 continue;
1003
1004 if (dma->cmd == HW_CMD_BLK_WRITE)
1005 card->ctrl[i].stats.writes_issued--;
1006 else if (dma->cmd == HW_CMD_BLK_DISCARD)
1007 card->ctrl[i].stats.discards_issued--;
1008 else
1009 card->ctrl[i].stats.reads_issued--;
1010
1011 list_add_tail(&dma->list, &issued_dmas[i]);
1012 push_tracker(card->ctrl[i].trackers, j);
1013 cnt++;
1014 }
1015
1016 spin_lock(&card->ctrl[i].queue_lock);
1017 list_splice(&issued_dmas[i], &card->ctrl[i].queue);
1018
1019 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
1020 card->ctrl[i].stats.sw_q_depth += cnt;
1021 card->ctrl[i].e_cnt = 0;
1022
1023 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1024 if (dma->dma_addr)
1025 pci_unmap_page(card->dev, dma->dma_addr,
1026 get_dma_size(dma),
1027 dma->cmd == HW_CMD_BLK_WRITE ?
1028 PCI_DMA_TODEVICE :
1029 PCI_DMA_FROMDEVICE);
1030 }
1031 spin_unlock(&card->ctrl[i].queue_lock);
1032 }
1033
1034 kfree(issued_dmas);
1035
1036 return 0;
1037}
1038
1039void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card)
1040{
1041 struct rsxx_dma *dma;
1042 struct rsxx_dma *tmp;
1043 int i;
1044
1045 for (i = 0; i < card->n_targets; i++) {
1046 spin_lock(&card->ctrl[i].queue_lock);
1047 list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) {
1048 list_del(&dma->list);
1049
1050 rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED);
1051 }
1052 spin_unlock(&card->ctrl[i].queue_lock);
1053 }
1054}
1055
1056int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
1057{
1058 struct rsxx_dma *dma;
1059 int i;
1060
1061 for (i = 0; i < card->n_targets; i++) {
1062 spin_lock(&card->ctrl[i].queue_lock);
1063 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1064 dma->dma_addr = pci_map_page(card->dev, dma->page,
1065 dma->pg_off, get_dma_size(dma),
1066 dma->cmd == HW_CMD_BLK_WRITE ?
1067 PCI_DMA_TODEVICE :
1068 PCI_DMA_FROMDEVICE);
1069 if (!dma->dma_addr) {
1070 spin_unlock(&card->ctrl[i].queue_lock);
1071 kmem_cache_free(rsxx_dma_pool, dma);
1072 return -ENOMEM;
1073 }
1074 }
1075 spin_unlock(&card->ctrl[i].queue_lock);
1076 }
1077
1078 return 0;
1079}
983 1080
984int rsxx_dma_init(void) 1081int rsxx_dma_init(void)
985{ 1082{
diff --git a/drivers/block/rsxx/rsxx.h b/drivers/block/rsxx/rsxx.h
index 2e50b65902b7..24ba3642bd89 100644
--- a/drivers/block/rsxx/rsxx.h
+++ b/drivers/block/rsxx/rsxx.h
@@ -27,15 +27,17 @@
27 27
28/*----------------- IOCTL Definitions -------------------*/ 28/*----------------- IOCTL Definitions -------------------*/
29 29
30#define RSXX_MAX_DATA 8
31
30struct rsxx_reg_access { 32struct rsxx_reg_access {
31 __u32 addr; 33 __u32 addr;
32 __u32 cnt; 34 __u32 cnt;
33 __u32 stat; 35 __u32 stat;
34 __u32 stream; 36 __u32 stream;
35 __u32 data[8]; 37 __u32 data[RSXX_MAX_DATA];
36}; 38};
37 39
38#define RSXX_MAX_REG_CNT (8 * (sizeof(__u32))) 40#define RSXX_MAX_REG_CNT (RSXX_MAX_DATA * (sizeof(__u32)))
39 41
40#define RSXX_IOC_MAGIC 'r' 42#define RSXX_IOC_MAGIC 'r'
41 43
diff --git a/drivers/block/rsxx/rsxx_cfg.h b/drivers/block/rsxx/rsxx_cfg.h
index c025fe5fdb70..f384c943846d 100644
--- a/drivers/block/rsxx/rsxx_cfg.h
+++ b/drivers/block/rsxx/rsxx_cfg.h
@@ -58,7 +58,7 @@ struct rsxx_card_cfg {
58}; 58};
59 59
60/* Vendor ID Values */ 60/* Vendor ID Values */
61#define RSXX_VENDOR_ID_TMS_IBM 0 61#define RSXX_VENDOR_ID_IBM 0
62#define RSXX_VENDOR_ID_DSI 1 62#define RSXX_VENDOR_ID_DSI 1
63#define RSXX_VENDOR_COUNT 2 63#define RSXX_VENDOR_COUNT 2
64 64
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index a1ac907d8f4c..382e8bf5c03b 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -45,16 +45,13 @@
45 45
46struct proc_cmd; 46struct proc_cmd;
47 47
48#define PCI_VENDOR_ID_TMS_IBM 0x15B6 48#define PCI_DEVICE_ID_FS70_FLASH 0x04A9
49#define PCI_DEVICE_ID_RS70_FLASH 0x0019 49#define PCI_DEVICE_ID_FS80_FLASH 0x04AA
50#define PCI_DEVICE_ID_RS70D_FLASH 0x001A
51#define PCI_DEVICE_ID_RS80_FLASH 0x001C
52#define PCI_DEVICE_ID_RS81_FLASH 0x001E
53 50
54#define RS70_PCI_REV_SUPPORTED 4 51#define RS70_PCI_REV_SUPPORTED 4
55 52
56#define DRIVER_NAME "rsxx" 53#define DRIVER_NAME "rsxx"
57#define DRIVER_VERSION "3.7" 54#define DRIVER_VERSION "4.0"
58 55
59/* Block size is 4096 */ 56/* Block size is 4096 */
60#define RSXX_HW_BLK_SHIFT 12 57#define RSXX_HW_BLK_SHIFT 12
@@ -67,6 +64,9 @@ struct proc_cmd;
67#define RSXX_MAX_OUTSTANDING_CMDS 255 64#define RSXX_MAX_OUTSTANDING_CMDS 255
68#define RSXX_CS_IDX_MASK 0xff 65#define RSXX_CS_IDX_MASK 0xff
69 66
67#define STATUS_BUFFER_SIZE8 4096
68#define COMMAND_BUFFER_SIZE8 4096
69
70#define RSXX_MAX_TARGETS 8 70#define RSXX_MAX_TARGETS 8
71 71
72struct dma_tracker_list; 72struct dma_tracker_list;
@@ -91,6 +91,9 @@ struct rsxx_dma_stats {
91 u32 discards_failed; 91 u32 discards_failed;
92 u32 done_rescheduled; 92 u32 done_rescheduled;
93 u32 issue_rescheduled; 93 u32 issue_rescheduled;
94 u32 dma_sw_err;
95 u32 dma_hw_fault;
96 u32 dma_cancelled;
94 u32 sw_q_depth; /* Number of DMAs on the SW queue. */ 97 u32 sw_q_depth; /* Number of DMAs on the SW queue. */
95 atomic_t hw_q_depth; /* Number of DMAs queued to HW. */ 98 atomic_t hw_q_depth; /* Number of DMAs queued to HW. */
96}; 99};
@@ -116,6 +119,7 @@ struct rsxx_dma_ctrl {
116struct rsxx_cardinfo { 119struct rsxx_cardinfo {
117 struct pci_dev *dev; 120 struct pci_dev *dev;
118 unsigned int halt; 121 unsigned int halt;
122 unsigned int eeh_state;
119 123
120 void __iomem *regmap; 124 void __iomem *regmap;
121 spinlock_t irq_lock; 125 spinlock_t irq_lock;
@@ -224,6 +228,7 @@ enum rsxx_pci_regmap {
224 PERF_RD512_HI = 0xac, 228 PERF_RD512_HI = 0xac,
225 PERF_WR512_LO = 0xb0, 229 PERF_WR512_LO = 0xb0,
226 PERF_WR512_HI = 0xb4, 230 PERF_WR512_HI = 0xb4,
231 PCI_RECONFIG = 0xb8,
227}; 232};
228 233
229enum rsxx_intr { 234enum rsxx_intr {
@@ -237,6 +242,8 @@ enum rsxx_intr {
237 CR_INTR_DMA5 = 0x00000080, 242 CR_INTR_DMA5 = 0x00000080,
238 CR_INTR_DMA6 = 0x00000100, 243 CR_INTR_DMA6 = 0x00000100,
239 CR_INTR_DMA7 = 0x00000200, 244 CR_INTR_DMA7 = 0x00000200,
245 CR_INTR_ALL_C = 0x0000003f,
246 CR_INTR_ALL_G = 0x000003ff,
240 CR_INTR_DMA_ALL = 0x000003f5, 247 CR_INTR_DMA_ALL = 0x000003f5,
241 CR_INTR_ALL = 0xffffffff, 248 CR_INTR_ALL = 0xffffffff,
242}; 249};
@@ -253,8 +260,14 @@ enum rsxx_pci_reset {
253 DMA_QUEUE_RESET = 0x00000001, 260 DMA_QUEUE_RESET = 0x00000001,
254}; 261};
255 262
263enum rsxx_hw_fifo_flush {
264 RSXX_FLUSH_BUSY = 0x00000002,
265 RSXX_FLUSH_TIMEOUT = 0x00000004,
266};
267
256enum rsxx_pci_revision { 268enum rsxx_pci_revision {
257 RSXX_DISCARD_SUPPORT = 2, 269 RSXX_DISCARD_SUPPORT = 2,
270 RSXX_EEH_SUPPORT = 3,
258}; 271};
259 272
260enum rsxx_creg_cmd { 273enum rsxx_creg_cmd {
@@ -360,11 +373,17 @@ int rsxx_dma_setup(struct rsxx_cardinfo *card);
360void rsxx_dma_destroy(struct rsxx_cardinfo *card); 373void rsxx_dma_destroy(struct rsxx_cardinfo *card);
361int rsxx_dma_init(void); 374int rsxx_dma_init(void);
362void rsxx_dma_cleanup(void); 375void rsxx_dma_cleanup(void);
376void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
377int rsxx_dma_configure(struct rsxx_cardinfo *card);
363int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, 378int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
364 struct bio *bio, 379 struct bio *bio,
365 atomic_t *n_dmas, 380 atomic_t *n_dmas,
366 rsxx_dma_cb cb, 381 rsxx_dma_cb cb,
367 void *cb_data); 382 void *cb_data);
383int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl);
384int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card);
385void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card);
386int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card);
368 387
369/***** cregs.c *****/ 388/***** cregs.c *****/
370int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr, 389int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
@@ -389,10 +408,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card);
389void rsxx_creg_destroy(struct rsxx_cardinfo *card); 408void rsxx_creg_destroy(struct rsxx_cardinfo *card);
390int rsxx_creg_init(void); 409int rsxx_creg_init(void);
391void rsxx_creg_cleanup(void); 410void rsxx_creg_cleanup(void);
392
393int rsxx_reg_access(struct rsxx_cardinfo *card, 411int rsxx_reg_access(struct rsxx_cardinfo *card,
394 struct rsxx_reg_access __user *ucmd, 412 struct rsxx_reg_access __user *ucmd,
395 int read); 413 int read);
414void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card);
415void rsxx_kick_creg_queue(struct rsxx_cardinfo *card);
396 416
397 417
398 418
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index de1f319f7bd7..dd5b2fed97e9 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -164,7 +164,7 @@ static void make_response(struct xen_blkif *blkif, u64 id,
164 164
165#define foreach_grant_safe(pos, n, rbtree, node) \ 165#define foreach_grant_safe(pos, n, rbtree, node) \
166 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ 166 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
167 (n) = rb_next(&(pos)->node); \ 167 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
168 &(pos)->node != NULL; \ 168 &(pos)->node != NULL; \
169 (pos) = container_of(n, typeof(*(pos)), node), \ 169 (pos) = container_of(n, typeof(*(pos)), node), \
170 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) 170 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
@@ -381,8 +381,8 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
381 381
382static void print_stats(struct xen_blkif *blkif) 382static void print_stats(struct xen_blkif *blkif)
383{ 383{
384 pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d" 384 pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
385 " | ds %4d\n", 385 " | ds %4llu\n",
386 current->comm, blkif->st_oo_req, 386 current->comm, blkif->st_oo_req,
387 blkif->st_rd_req, blkif->st_wr_req, 387 blkif->st_rd_req, blkif->st_wr_req,
388 blkif->st_f_req, blkif->st_ds_req); 388 blkif->st_f_req, blkif->st_ds_req);
@@ -442,7 +442,7 @@ int xen_blkif_schedule(void *arg)
442} 442}
443 443
444struct seg_buf { 444struct seg_buf {
445 unsigned long buf; 445 unsigned int offset;
446 unsigned int nsec; 446 unsigned int nsec;
447}; 447};
448/* 448/*
@@ -621,30 +621,21 @@ static int xen_blkbk_map(struct blkif_request *req,
621 * If this is a new persistent grant 621 * If this is a new persistent grant
622 * save the handler 622 * save the handler
623 */ 623 */
624 persistent_gnts[i]->handle = map[j].handle; 624 persistent_gnts[i]->handle = map[j++].handle;
625 persistent_gnts[i]->dev_bus_addr =
626 map[j++].dev_bus_addr;
627 } 625 }
628 pending_handle(pending_req, i) = 626 pending_handle(pending_req, i) =
629 persistent_gnts[i]->handle; 627 persistent_gnts[i]->handle;
630 628
631 if (ret) 629 if (ret)
632 continue; 630 continue;
633
634 seg[i].buf = persistent_gnts[i]->dev_bus_addr |
635 (req->u.rw.seg[i].first_sect << 9);
636 } else { 631 } else {
637 pending_handle(pending_req, i) = map[j].handle; 632 pending_handle(pending_req, i) = map[j++].handle;
638 bitmap_set(pending_req->unmap_seg, i, 1); 633 bitmap_set(pending_req->unmap_seg, i, 1);
639 634
640 if (ret) { 635 if (ret)
641 j++;
642 continue; 636 continue;
643 }
644
645 seg[i].buf = map[j++].dev_bus_addr |
646 (req->u.rw.seg[i].first_sect << 9);
647 } 637 }
638 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
648 } 639 }
649 return ret; 640 return ret;
650} 641}
@@ -679,6 +670,16 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
679 return err; 670 return err;
680} 671}
681 672
673static int dispatch_other_io(struct xen_blkif *blkif,
674 struct blkif_request *req,
675 struct pending_req *pending_req)
676{
677 free_req(pending_req);
678 make_response(blkif, req->u.other.id, req->operation,
679 BLKIF_RSP_EOPNOTSUPP);
680 return -EIO;
681}
682
682static void xen_blk_drain_io(struct xen_blkif *blkif) 683static void xen_blk_drain_io(struct xen_blkif *blkif)
683{ 684{
684 atomic_set(&blkif->drain, 1); 685 atomic_set(&blkif->drain, 1);
@@ -800,17 +801,30 @@ __do_block_io_op(struct xen_blkif *blkif)
800 801
801 /* Apply all sanity checks to /private copy/ of request. */ 802 /* Apply all sanity checks to /private copy/ of request. */
802 barrier(); 803 barrier();
803 if (unlikely(req.operation == BLKIF_OP_DISCARD)) { 804
805 switch (req.operation) {
806 case BLKIF_OP_READ:
807 case BLKIF_OP_WRITE:
808 case BLKIF_OP_WRITE_BARRIER:
809 case BLKIF_OP_FLUSH_DISKCACHE:
810 if (dispatch_rw_block_io(blkif, &req, pending_req))
811 goto done;
812 break;
813 case BLKIF_OP_DISCARD:
804 free_req(pending_req); 814 free_req(pending_req);
805 if (dispatch_discard_io(blkif, &req)) 815 if (dispatch_discard_io(blkif, &req))
806 break; 816 goto done;
807 } else if (dispatch_rw_block_io(blkif, &req, pending_req))
808 break; 817 break;
818 default:
819 if (dispatch_other_io(blkif, &req, pending_req))
820 goto done;
821 break;
822 }
809 823
810 /* Yield point for this unbounded loop. */ 824 /* Yield point for this unbounded loop. */
811 cond_resched(); 825 cond_resched();
812 } 826 }
813 827done:
814 return more_to_do; 828 return more_to_do;
815} 829}
816 830
@@ -904,7 +918,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
904 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", 918 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
905 operation == READ ? "read" : "write", 919 operation == READ ? "read" : "write",
906 preq.sector_number, 920 preq.sector_number,
907 preq.sector_number + preq.nr_sects, preq.dev); 921 preq.sector_number + preq.nr_sects,
922 blkif->vbd.pdevice);
908 goto fail_response; 923 goto fail_response;
909 } 924 }
910 925
@@ -947,7 +962,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
947 (bio_add_page(bio, 962 (bio_add_page(bio,
948 pages[i], 963 pages[i],
949 seg[i].nsec << 9, 964 seg[i].nsec << 9,
950 seg[i].buf & ~PAGE_MASK) == 0)) { 965 seg[i].offset) == 0)) {
951 966
952 bio = bio_alloc(GFP_KERNEL, nseg-i); 967 bio = bio_alloc(GFP_KERNEL, nseg-i);
953 if (unlikely(bio == NULL)) 968 if (unlikely(bio == NULL))
@@ -977,13 +992,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
977 bio->bi_end_io = end_block_io_op; 992 bio->bi_end_io = end_block_io_op;
978 } 993 }
979 994
980 /*
981 * We set it one so that the last submit_bio does not have to call
982 * atomic_inc.
983 */
984 atomic_set(&pending_req->pendcnt, nbio); 995 atomic_set(&pending_req->pendcnt, nbio);
985
986 /* Get a reference count for the disk queue and start sending I/O */
987 blk_start_plug(&plug); 996 blk_start_plug(&plug);
988 997
989 for (i = 0; i < nbio; i++) 998 for (i = 0; i < nbio; i++)
@@ -1011,6 +1020,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1011 fail_put_bio: 1020 fail_put_bio:
1012 for (i = 0; i < nbio; i++) 1021 for (i = 0; i < nbio; i++)
1013 bio_put(biolist[i]); 1022 bio_put(biolist[i]);
1023 atomic_set(&pending_req->pendcnt, 1);
1014 __end_block_io_op(pending_req, -EINVAL); 1024 __end_block_io_op(pending_req, -EINVAL);
1015 msleep(1); /* back off a bit */ 1025 msleep(1); /* back off a bit */
1016 return -EIO; 1026 return -EIO;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 6072390c7f57..60103e2517ba 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -77,11 +77,18 @@ struct blkif_x86_32_request_discard {
77 uint64_t nr_sectors; 77 uint64_t nr_sectors;
78} __attribute__((__packed__)); 78} __attribute__((__packed__));
79 79
80struct blkif_x86_32_request_other {
81 uint8_t _pad1;
82 blkif_vdev_t _pad2;
83 uint64_t id; /* private guest value, echoed in resp */
84} __attribute__((__packed__));
85
80struct blkif_x86_32_request { 86struct blkif_x86_32_request {
81 uint8_t operation; /* BLKIF_OP_??? */ 87 uint8_t operation; /* BLKIF_OP_??? */
82 union { 88 union {
83 struct blkif_x86_32_request_rw rw; 89 struct blkif_x86_32_request_rw rw;
84 struct blkif_x86_32_request_discard discard; 90 struct blkif_x86_32_request_discard discard;
91 struct blkif_x86_32_request_other other;
85 } u; 92 } u;
86} __attribute__((__packed__)); 93} __attribute__((__packed__));
87 94
@@ -113,11 +120,19 @@ struct blkif_x86_64_request_discard {
113 uint64_t nr_sectors; 120 uint64_t nr_sectors;
114} __attribute__((__packed__)); 121} __attribute__((__packed__));
115 122
123struct blkif_x86_64_request_other {
124 uint8_t _pad1;
125 blkif_vdev_t _pad2;
126 uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */
127 uint64_t id; /* private guest value, echoed in resp */
128} __attribute__((__packed__));
129
116struct blkif_x86_64_request { 130struct blkif_x86_64_request {
117 uint8_t operation; /* BLKIF_OP_??? */ 131 uint8_t operation; /* BLKIF_OP_??? */
118 union { 132 union {
119 struct blkif_x86_64_request_rw rw; 133 struct blkif_x86_64_request_rw rw;
120 struct blkif_x86_64_request_discard discard; 134 struct blkif_x86_64_request_discard discard;
135 struct blkif_x86_64_request_other other;
121 } u; 136 } u;
122} __attribute__((__packed__)); 137} __attribute__((__packed__));
123 138
@@ -172,7 +187,6 @@ struct persistent_gnt {
172 struct page *page; 187 struct page *page;
173 grant_ref_t gnt; 188 grant_ref_t gnt;
174 grant_handle_t handle; 189 grant_handle_t handle;
175 uint64_t dev_bus_addr;
176 struct rb_node node; 190 struct rb_node node;
177}; 191};
178 192
@@ -208,13 +222,13 @@ struct xen_blkif {
208 222
209 /* statistics */ 223 /* statistics */
210 unsigned long st_print; 224 unsigned long st_print;
211 int st_rd_req; 225 unsigned long long st_rd_req;
212 int st_wr_req; 226 unsigned long long st_wr_req;
213 int st_oo_req; 227 unsigned long long st_oo_req;
214 int st_f_req; 228 unsigned long long st_f_req;
215 int st_ds_req; 229 unsigned long long st_ds_req;
216 int st_rd_sect; 230 unsigned long long st_rd_sect;
217 int st_wr_sect; 231 unsigned long long st_wr_sect;
218 232
219 wait_queue_head_t waiting_to_free; 233 wait_queue_head_t waiting_to_free;
220}; 234};
@@ -278,6 +292,11 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
278 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 292 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
279 break; 293 break;
280 default: 294 default:
295 /*
296 * Don't know how to translate this op. Only get the
297 * ID so failure can be reported to the frontend.
298 */
299 dst->u.other.id = src->u.other.id;
281 break; 300 break;
282 } 301 }
283} 302}
@@ -309,6 +328,11 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
309 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 328 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
310 break; 329 break;
311 default: 330 default:
331 /*
332 * Don't know how to translate this op. Only get the
333 * ID so failure can be reported to the frontend.
334 */
335 dst->u.other.id = src->u.other.id;
312 break; 336 break;
313 } 337 }
314} 338}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 5e237f630c47..8bfd1bcf95ec 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -230,13 +230,13 @@ int __init xen_blkif_interface_init(void)
230 } \ 230 } \
231 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) 231 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
232 232
233VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req); 233VBD_SHOW(oo_req, "%llu\n", be->blkif->st_oo_req);
234VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req); 234VBD_SHOW(rd_req, "%llu\n", be->blkif->st_rd_req);
235VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req); 235VBD_SHOW(wr_req, "%llu\n", be->blkif->st_wr_req);
236VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req); 236VBD_SHOW(f_req, "%llu\n", be->blkif->st_f_req);
237VBD_SHOW(ds_req, "%d\n", be->blkif->st_ds_req); 237VBD_SHOW(ds_req, "%llu\n", be->blkif->st_ds_req);
238VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect); 238VBD_SHOW(rd_sect, "%llu\n", be->blkif->st_rd_sect);
239VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect); 239VBD_SHOW(wr_sect, "%llu\n", be->blkif->st_wr_sect);
240 240
241static struct attribute *xen_vbdstat_attrs[] = { 241static struct attribute *xen_vbdstat_attrs[] = {
242 &dev_attr_oo_req.attr, 242 &dev_attr_oo_req.attr,
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c3dae2e0f290..a894f88762d8 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -44,7 +44,7 @@
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45#include <linux/scatterlist.h> 45#include <linux/scatterlist.h>
46#include <linux/bitmap.h> 46#include <linux/bitmap.h>
47#include <linux/llist.h> 47#include <linux/list.h>
48 48
49#include <xen/xen.h> 49#include <xen/xen.h>
50#include <xen/xenbus.h> 50#include <xen/xenbus.h>
@@ -68,13 +68,12 @@ enum blkif_state {
68struct grant { 68struct grant {
69 grant_ref_t gref; 69 grant_ref_t gref;
70 unsigned long pfn; 70 unsigned long pfn;
71 struct llist_node node; 71 struct list_head node;
72}; 72};
73 73
74struct blk_shadow { 74struct blk_shadow {
75 struct blkif_request req; 75 struct blkif_request req;
76 struct request *request; 76 struct request *request;
77 unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
78 struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 77 struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
79}; 78};
80 79
@@ -105,7 +104,7 @@ struct blkfront_info
105 struct work_struct work; 104 struct work_struct work;
106 struct gnttab_free_callback callback; 105 struct gnttab_free_callback callback;
107 struct blk_shadow shadow[BLK_RING_SIZE]; 106 struct blk_shadow shadow[BLK_RING_SIZE];
108 struct llist_head persistent_gnts; 107 struct list_head persistent_gnts;
109 unsigned int persistent_gnts_c; 108 unsigned int persistent_gnts_c;
110 unsigned long shadow_free; 109 unsigned long shadow_free;
111 unsigned int feature_flush; 110 unsigned int feature_flush;
@@ -165,6 +164,69 @@ static int add_id_to_freelist(struct blkfront_info *info,
165 return 0; 164 return 0;
166} 165}
167 166
167static int fill_grant_buffer(struct blkfront_info *info, int num)
168{
169 struct page *granted_page;
170 struct grant *gnt_list_entry, *n;
171 int i = 0;
172
173 while(i < num) {
174 gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
175 if (!gnt_list_entry)
176 goto out_of_memory;
177
178 granted_page = alloc_page(GFP_NOIO);
179 if (!granted_page) {
180 kfree(gnt_list_entry);
181 goto out_of_memory;
182 }
183
184 gnt_list_entry->pfn = page_to_pfn(granted_page);
185 gnt_list_entry->gref = GRANT_INVALID_REF;
186 list_add(&gnt_list_entry->node, &info->persistent_gnts);
187 i++;
188 }
189
190 return 0;
191
192out_of_memory:
193 list_for_each_entry_safe(gnt_list_entry, n,
194 &info->persistent_gnts, node) {
195 list_del(&gnt_list_entry->node);
196 __free_page(pfn_to_page(gnt_list_entry->pfn));
197 kfree(gnt_list_entry);
198 i--;
199 }
200 BUG_ON(i != 0);
201 return -ENOMEM;
202}
203
204static struct grant *get_grant(grant_ref_t *gref_head,
205 struct blkfront_info *info)
206{
207 struct grant *gnt_list_entry;
208 unsigned long buffer_mfn;
209
210 BUG_ON(list_empty(&info->persistent_gnts));
211 gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant,
212 node);
213 list_del(&gnt_list_entry->node);
214
215 if (gnt_list_entry->gref != GRANT_INVALID_REF) {
216 info->persistent_gnts_c--;
217 return gnt_list_entry;
218 }
219
220 /* Assign a gref to this page */
221 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
222 BUG_ON(gnt_list_entry->gref == -ENOSPC);
223 buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
224 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
225 info->xbdev->otherend_id,
226 buffer_mfn, 0);
227 return gnt_list_entry;
228}
229
168static const char *op_name(int op) 230static const char *op_name(int op)
169{ 231{
170 static const char *const names[] = { 232 static const char *const names[] = {
@@ -293,7 +355,6 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
293static int blkif_queue_request(struct request *req) 355static int blkif_queue_request(struct request *req)
294{ 356{
295 struct blkfront_info *info = req->rq_disk->private_data; 357 struct blkfront_info *info = req->rq_disk->private_data;
296 unsigned long buffer_mfn;
297 struct blkif_request *ring_req; 358 struct blkif_request *ring_req;
298 unsigned long id; 359 unsigned long id;
299 unsigned int fsect, lsect; 360 unsigned int fsect, lsect;
@@ -306,7 +367,6 @@ static int blkif_queue_request(struct request *req)
306 */ 367 */
307 bool new_persistent_gnts; 368 bool new_persistent_gnts;
308 grant_ref_t gref_head; 369 grant_ref_t gref_head;
309 struct page *granted_page;
310 struct grant *gnt_list_entry = NULL; 370 struct grant *gnt_list_entry = NULL;
311 struct scatterlist *sg; 371 struct scatterlist *sg;
312 372
@@ -370,41 +430,8 @@ static int blkif_queue_request(struct request *req)
370 fsect = sg->offset >> 9; 430 fsect = sg->offset >> 9;
371 lsect = fsect + (sg->length >> 9) - 1; 431 lsect = fsect + (sg->length >> 9) - 1;
372 432
373 if (info->persistent_gnts_c) { 433 gnt_list_entry = get_grant(&gref_head, info);
374 BUG_ON(llist_empty(&info->persistent_gnts)); 434 ref = gnt_list_entry->gref;
375 gnt_list_entry = llist_entry(
376 llist_del_first(&info->persistent_gnts),
377 struct grant, node);
378
379 ref = gnt_list_entry->gref;
380 buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
381 info->persistent_gnts_c--;
382 } else {
383 ref = gnttab_claim_grant_reference(&gref_head);
384 BUG_ON(ref == -ENOSPC);
385
386 gnt_list_entry =
387 kmalloc(sizeof(struct grant),
388 GFP_ATOMIC);
389 if (!gnt_list_entry)
390 return -ENOMEM;
391
392 granted_page = alloc_page(GFP_ATOMIC);
393 if (!granted_page) {
394 kfree(gnt_list_entry);
395 return -ENOMEM;
396 }
397
398 gnt_list_entry->pfn =
399 page_to_pfn(granted_page);
400 gnt_list_entry->gref = ref;
401
402 buffer_mfn = pfn_to_mfn(page_to_pfn(
403 granted_page));
404 gnttab_grant_foreign_access_ref(ref,
405 info->xbdev->otherend_id,
406 buffer_mfn, 0);
407 }
408 435
409 info->shadow[id].grants_used[i] = gnt_list_entry; 436 info->shadow[id].grants_used[i] = gnt_list_entry;
410 437
@@ -435,7 +462,6 @@ static int blkif_queue_request(struct request *req)
435 kunmap_atomic(shared_data); 462 kunmap_atomic(shared_data);
436 } 463 }
437 464
438 info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
439 ring_req->u.rw.seg[i] = 465 ring_req->u.rw.seg[i] =
440 (struct blkif_request_segment) { 466 (struct blkif_request_segment) {
441 .gref = ref, 467 .gref = ref,
@@ -790,9 +816,8 @@ static void blkif_restart_queue(struct work_struct *work)
790 816
791static void blkif_free(struct blkfront_info *info, int suspend) 817static void blkif_free(struct blkfront_info *info, int suspend)
792{ 818{
793 struct llist_node *all_gnts; 819 struct grant *persistent_gnt;
794 struct grant *persistent_gnt, *tmp; 820 struct grant *n;
795 struct llist_node *n;
796 821
797 /* Prevent new requests being issued until we fix things up. */ 822 /* Prevent new requests being issued until we fix things up. */
798 spin_lock_irq(&info->io_lock); 823 spin_lock_irq(&info->io_lock);
@@ -803,22 +828,20 @@ static void blkif_free(struct blkfront_info *info, int suspend)
803 blk_stop_queue(info->rq); 828 blk_stop_queue(info->rq);
804 829
805 /* Remove all persistent grants */ 830 /* Remove all persistent grants */
806 if (info->persistent_gnts_c) { 831 if (!list_empty(&info->persistent_gnts)) {
807 all_gnts = llist_del_all(&info->persistent_gnts); 832 list_for_each_entry_safe(persistent_gnt, n,
808 persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node); 833 &info->persistent_gnts, node) {
809 while (persistent_gnt) { 834 list_del(&persistent_gnt->node);
810 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); 835 if (persistent_gnt->gref != GRANT_INVALID_REF) {
836 gnttab_end_foreign_access(persistent_gnt->gref,
837 0, 0UL);
838 info->persistent_gnts_c--;
839 }
811 __free_page(pfn_to_page(persistent_gnt->pfn)); 840 __free_page(pfn_to_page(persistent_gnt->pfn));
812 tmp = persistent_gnt; 841 kfree(persistent_gnt);
813 n = persistent_gnt->node.next;
814 if (n)
815 persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node);
816 else
817 persistent_gnt = NULL;
818 kfree(tmp);
819 } 842 }
820 info->persistent_gnts_c = 0;
821 } 843 }
844 BUG_ON(info->persistent_gnts_c != 0);
822 845
823 /* No more gnttab callback work. */ 846 /* No more gnttab callback work. */
824 gnttab_cancel_free_callback(&info->callback); 847 gnttab_cancel_free_callback(&info->callback);
@@ -875,7 +898,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
875 } 898 }
876 /* Add the persistent grant into the list of free grants */ 899 /* Add the persistent grant into the list of free grants */
877 for (i = 0; i < s->req.u.rw.nr_segments; i++) { 900 for (i = 0; i < s->req.u.rw.nr_segments; i++) {
878 llist_add(&s->grants_used[i]->node, &info->persistent_gnts); 901 list_add(&s->grants_used[i]->node, &info->persistent_gnts);
879 info->persistent_gnts_c++; 902 info->persistent_gnts_c++;
880 } 903 }
881} 904}
@@ -1013,6 +1036,12 @@ static int setup_blkring(struct xenbus_device *dev,
1013 1036
1014 sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); 1037 sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
1015 1038
1039 /* Allocate memory for grants */
1040 err = fill_grant_buffer(info, BLK_RING_SIZE *
1041 BLKIF_MAX_SEGMENTS_PER_REQUEST);
1042 if (err)
1043 goto fail;
1044
1016 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); 1045 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
1017 if (err < 0) { 1046 if (err < 0) {
1018 free_page((unsigned long)sring); 1047 free_page((unsigned long)sring);
@@ -1171,7 +1200,7 @@ static int blkfront_probe(struct xenbus_device *dev,
1171 spin_lock_init(&info->io_lock); 1200 spin_lock_init(&info->io_lock);
1172 info->xbdev = dev; 1201 info->xbdev = dev;
1173 info->vdevice = vdevice; 1202 info->vdevice = vdevice;
1174 init_llist_head(&info->persistent_gnts); 1203 INIT_LIST_HEAD(&info->persistent_gnts);
1175 info->persistent_gnts_c = 0; 1204 info->persistent_gnts_c = 0;
1176 info->connected = BLKIF_STATE_DISCONNECTED; 1205 info->connected = BLKIF_STATE_DISCONNECTED;
1177 INIT_WORK(&info->work, blkif_restart_queue); 1206 INIT_WORK(&info->work, blkif_restart_queue);
@@ -1203,11 +1232,10 @@ static int blkif_recover(struct blkfront_info *info)
1203 int j; 1232 int j;
1204 1233
1205 /* Stage 1: Make a safe copy of the shadow state. */ 1234 /* Stage 1: Make a safe copy of the shadow state. */
1206 copy = kmalloc(sizeof(info->shadow), 1235 copy = kmemdup(info->shadow, sizeof(info->shadow),
1207 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); 1236 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
1208 if (!copy) 1237 if (!copy)
1209 return -ENOMEM; 1238 return -ENOMEM;
1210 memcpy(copy, info->shadow, sizeof(info->shadow));
1211 1239
1212 /* Stage 2: Set up free list. */ 1240 /* Stage 2: Set up free list. */
1213 memset(&info->shadow, 0, sizeof(info->shadow)); 1241 memset(&info->shadow, 0, sizeof(info->shadow));
@@ -1236,7 +1264,7 @@ static int blkif_recover(struct blkfront_info *info)
1236 gnttab_grant_foreign_access_ref( 1264 gnttab_grant_foreign_access_ref(
1237 req->u.rw.seg[j].gref, 1265 req->u.rw.seg[j].gref,
1238 info->xbdev->otherend_id, 1266 info->xbdev->otherend_id,
1239 pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]), 1267 pfn_to_mfn(copy[i].grants_used[j]->pfn),
1240 0); 1268 0);
1241 } 1269 }
1242 info->shadow[req->u.rw.id].req = *req; 1270 info->shadow[req->u.rw.id].req = *req;
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a8a41e07a221..6aab00ef4379 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -73,9 +73,13 @@ static struct usb_device_id ath3k_table[] = {
73 { USB_DEVICE(0x03F0, 0x311D) }, 73 { USB_DEVICE(0x03F0, 0x311D) },
74 74
75 /* Atheros AR3012 with sflash firmware*/ 75 /* Atheros AR3012 with sflash firmware*/
76 { USB_DEVICE(0x0CF3, 0x0036) },
76 { USB_DEVICE(0x0CF3, 0x3004) }, 77 { USB_DEVICE(0x0CF3, 0x3004) },
78 { USB_DEVICE(0x0CF3, 0x3008) },
77 { USB_DEVICE(0x0CF3, 0x311D) }, 79 { USB_DEVICE(0x0CF3, 0x311D) },
80 { USB_DEVICE(0x0CF3, 0x817a) },
78 { USB_DEVICE(0x13d3, 0x3375) }, 81 { USB_DEVICE(0x13d3, 0x3375) },
82 { USB_DEVICE(0x04CA, 0x3004) },
79 { USB_DEVICE(0x04CA, 0x3005) }, 83 { USB_DEVICE(0x04CA, 0x3005) },
80 { USB_DEVICE(0x04CA, 0x3006) }, 84 { USB_DEVICE(0x04CA, 0x3006) },
81 { USB_DEVICE(0x04CA, 0x3008) }, 85 { USB_DEVICE(0x04CA, 0x3008) },
@@ -105,9 +109,13 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
105static struct usb_device_id ath3k_blist_tbl[] = { 109static struct usb_device_id ath3k_blist_tbl[] = {
106 110
107 /* Atheros AR3012 with sflash firmware*/ 111 /* Atheros AR3012 with sflash firmware*/
112 { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
108 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 113 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
114 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
109 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, 115 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
116 { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
110 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 117 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
118 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
111 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 119 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
112 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, 120 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
113 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 121 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 7e351e345476..2cc5f774a29c 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -131,9 +131,13 @@ static struct usb_device_id blacklist_table[] = {
131 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, 131 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
132 132
133 /* Atheros 3012 with sflash firmware */ 133 /* Atheros 3012 with sflash firmware */
134 { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
134 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 135 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
136 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
135 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, 137 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
138 { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
136 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 139 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
140 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
137 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 141 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
138 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, 142 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
139 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 143 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index b5538bba7a10..09c63315e579 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -157,7 +157,7 @@ static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
157 divisor = parent_rate / rate; 157 divisor = parent_rate / rate;
158 158
159 /* If prate / rate would be decimal, incr the divisor */ 159 /* If prate / rate would be decimal, incr the divisor */
160 if (rate * divisor < *prate) 160 if (rate * divisor < parent_rate)
161 divisor++; 161 divisor++;
162 162
163 if (divisor == cdev->div_mask + 1) 163 if (divisor == cdev->div_mask + 1)
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 937bc286591f..57a8774f0b4e 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -730,7 +730,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
730 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 730 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
731 cpumask_copy(policy->cpus, perf->shared_cpu_map); 731 cpumask_copy(policy->cpus, perf->shared_cpu_map);
732 } 732 }
733 cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
734 733
735#ifdef CONFIG_SMP 734#ifdef CONFIG_SMP
736 dmi_check_system(sw_any_bug_dmi_table); 735 dmi_check_system(sw_any_bug_dmi_table);
@@ -742,7 +741,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
742 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { 741 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
743 cpumask_clear(policy->cpus); 742 cpumask_clear(policy->cpus);
744 cpumask_set_cpu(cpu, policy->cpus); 743 cpumask_set_cpu(cpu, policy->cpus);
745 cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
746 policy->shared_type = CPUFREQ_SHARED_TYPE_HW; 744 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
747 pr_info_once(PFX "overriding BIOS provided _PSD data\n"); 745 pr_info_once(PFX "overriding BIOS provided _PSD data\n");
748 } 746 }
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 2fd779eb1ed1..bfd6273fd873 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -180,15 +180,19 @@ static void cpufreq_stats_free_sysfs(unsigned int cpu)
180{ 180{
181 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 181 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
182 182
183 if (!cpufreq_frequency_get_table(cpu)) 183 if (!policy)
184 return; 184 return;
185 185
186 if (policy && !policy_is_shared(policy)) { 186 if (!cpufreq_frequency_get_table(cpu))
187 goto put_ref;
188
189 if (!policy_is_shared(policy)) {
187 pr_debug("%s: Free sysfs stat\n", __func__); 190 pr_debug("%s: Free sysfs stat\n", __func__);
188 sysfs_remove_group(&policy->kobj, &stats_attr_group); 191 sysfs_remove_group(&policy->kobj, &stats_attr_group);
189 } 192 }
190 if (policy) 193
191 cpufreq_cpu_put(policy); 194put_ref:
195 cpufreq_cpu_put(policy);
192} 196}
193 197
194static int cpufreq_stats_create_table(struct cpufreq_policy *policy, 198static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f6dd1e761129..ad72922919ed 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -358,14 +358,14 @@ static void intel_pstate_sysfs_expose_params(void)
358static int intel_pstate_min_pstate(void) 358static int intel_pstate_min_pstate(void)
359{ 359{
360 u64 value; 360 u64 value;
361 rdmsrl(0xCE, value); 361 rdmsrl(MSR_PLATFORM_INFO, value);
362 return (value >> 40) & 0xFF; 362 return (value >> 40) & 0xFF;
363} 363}
364 364
365static int intel_pstate_max_pstate(void) 365static int intel_pstate_max_pstate(void)
366{ 366{
367 u64 value; 367 u64 value;
368 rdmsrl(0xCE, value); 368 rdmsrl(MSR_PLATFORM_INFO, value);
369 return (value >> 8) & 0xFF; 369 return (value >> 8) & 0xFF;
370} 370}
371 371
@@ -373,7 +373,7 @@ static int intel_pstate_turbo_pstate(void)
373{ 373{
374 u64 value; 374 u64 value;
375 int nont, ret; 375 int nont, ret;
376 rdmsrl(0x1AD, value); 376 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
377 nont = intel_pstate_max_pstate(); 377 nont = intel_pstate_max_pstate();
378 ret = ((value) & 255); 378 ret = ((value) & 255);
379 if (ret <= nont) 379 if (ret <= nont)
@@ -454,7 +454,7 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
454 sample->idletime_us * 100, 454 sample->idletime_us * 100,
455 sample->duration_us); 455 sample->duration_us);
456 core_pct = div64_u64(sample->aperf * 100, sample->mperf); 456 core_pct = div64_u64(sample->aperf * 100, sample->mperf);
457 sample->freq = cpu->pstate.turbo_pstate * core_pct * 1000; 457 sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
458 458
459 sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), 459 sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
460 100); 460 100);
@@ -752,6 +752,29 @@ static struct cpufreq_driver intel_pstate_driver = {
752 752
753static int __initdata no_load; 753static int __initdata no_load;
754 754
755static int intel_pstate_msrs_not_valid(void)
756{
757 /* Check that all the msr's we are using are valid. */
758 u64 aperf, mperf, tmp;
759
760 rdmsrl(MSR_IA32_APERF, aperf);
761 rdmsrl(MSR_IA32_MPERF, mperf);
762
763 if (!intel_pstate_min_pstate() ||
764 !intel_pstate_max_pstate() ||
765 !intel_pstate_turbo_pstate())
766 return -ENODEV;
767
768 rdmsrl(MSR_IA32_APERF, tmp);
769 if (!(tmp - aperf))
770 return -ENODEV;
771
772 rdmsrl(MSR_IA32_MPERF, tmp);
773 if (!(tmp - mperf))
774 return -ENODEV;
775
776 return 0;
777}
755static int __init intel_pstate_init(void) 778static int __init intel_pstate_init(void)
756{ 779{
757 int cpu, rc = 0; 780 int cpu, rc = 0;
@@ -764,6 +787,9 @@ static int __init intel_pstate_init(void)
764 if (!id) 787 if (!id)
765 return -ENODEV; 788 return -ENODEV;
766 789
790 if (intel_pstate_msrs_not_valid())
791 return -ENODEV;
792
767 pr_info("Intel P-state driver initializing.\n"); 793 pr_info("Intel P-state driver initializing.\n");
768 794
769 all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); 795 all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index b2a0a0726a54..cf268b14ae9a 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1650,11 +1650,7 @@ struct caam_alg_template {
1650}; 1650};
1651 1651
1652static struct caam_alg_template driver_algs[] = { 1652static struct caam_alg_template driver_algs[] = {
1653 /* 1653 /* single-pass ipsec_esp descriptor */
1654 * single-pass ipsec_esp descriptor
1655 * authencesn(*,*) is also registered, although not present
1656 * explicitly here.
1657 */
1658 { 1654 {
1659 .name = "authenc(hmac(md5),cbc(aes))", 1655 .name = "authenc(hmac(md5),cbc(aes))",
1660 .driver_name = "authenc-hmac-md5-cbc-aes-caam", 1656 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
@@ -2217,9 +2213,7 @@ static int __init caam_algapi_init(void)
2217 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2213 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2218 /* TODO: check if h/w supports alg */ 2214 /* TODO: check if h/w supports alg */
2219 struct caam_crypto_alg *t_alg; 2215 struct caam_crypto_alg *t_alg;
2220 bool done = false;
2221 2216
2222authencesn:
2223 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); 2217 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
2224 if (IS_ERR(t_alg)) { 2218 if (IS_ERR(t_alg)) {
2225 err = PTR_ERR(t_alg); 2219 err = PTR_ERR(t_alg);
@@ -2233,25 +2227,8 @@ authencesn:
2233 dev_warn(ctrldev, "%s alg registration failed\n", 2227 dev_warn(ctrldev, "%s alg registration failed\n",
2234 t_alg->crypto_alg.cra_driver_name); 2228 t_alg->crypto_alg.cra_driver_name);
2235 kfree(t_alg); 2229 kfree(t_alg);
2236 } else { 2230 } else
2237 list_add_tail(&t_alg->entry, &priv->alg_list); 2231 list_add_tail(&t_alg->entry, &priv->alg_list);
2238 if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD &&
2239 !memcmp(driver_algs[i].name, "authenc", 7) &&
2240 !done) {
2241 char *name;
2242
2243 name = driver_algs[i].name;
2244 memmove(name + 10, name + 7, strlen(name) - 7);
2245 memcpy(name + 7, "esn", 3);
2246
2247 name = driver_algs[i].driver_name;
2248 memmove(name + 10, name + 7, strlen(name) - 7);
2249 memcpy(name + 7, "esn", 3);
2250
2251 done = true;
2252 goto authencesn;
2253 }
2254 }
2255 } 2232 }
2256 if (!list_empty(&priv->alg_list)) 2233 if (!list_empty(&priv->alg_list))
2257 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", 2234 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index cf15e7813801..762aeff626ac 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -23,7 +23,6 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/circ_buf.h> 25#include <linux/circ_buf.h>
26#include <linux/string.h>
27#include <net/xfrm.h> 26#include <net/xfrm.h>
28 27
29#include <crypto/algapi.h> 28#include <crypto/algapi.h>
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 09b184adf31b..5b2b5e61e4f9 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -38,7 +38,6 @@
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <linux/rtnetlink.h> 39#include <linux/rtnetlink.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/string.h>
42 41
43#include <crypto/algapi.h> 42#include <crypto/algapi.h>
44#include <crypto/aes.h> 43#include <crypto/aes.h>
@@ -1974,11 +1973,7 @@ struct talitos_alg_template {
1974}; 1973};
1975 1974
1976static struct talitos_alg_template driver_algs[] = { 1975static struct talitos_alg_template driver_algs[] = {
1977 /* 1976 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
1978 * AEAD algorithms. These use a single-pass ipsec_esp descriptor.
1979 * authencesn(*,*) is also registered, although not present
1980 * explicitly here.
1981 */
1982 { .type = CRYPTO_ALG_TYPE_AEAD, 1977 { .type = CRYPTO_ALG_TYPE_AEAD,
1983 .alg.crypto = { 1978 .alg.crypto = {
1984 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1979 .cra_name = "authenc(hmac(sha1),cbc(aes))",
@@ -2820,9 +2815,7 @@ static int talitos_probe(struct platform_device *ofdev)
2820 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { 2815 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2821 struct talitos_crypto_alg *t_alg; 2816 struct talitos_crypto_alg *t_alg;
2822 char *name = NULL; 2817 char *name = NULL;
2823 bool authenc = false;
2824 2818
2825authencesn:
2826 t_alg = talitos_alg_alloc(dev, &driver_algs[i]); 2819 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2827 if (IS_ERR(t_alg)) { 2820 if (IS_ERR(t_alg)) {
2828 err = PTR_ERR(t_alg); 2821 err = PTR_ERR(t_alg);
@@ -2837,8 +2830,6 @@ authencesn:
2837 err = crypto_register_alg( 2830 err = crypto_register_alg(
2838 &t_alg->algt.alg.crypto); 2831 &t_alg->algt.alg.crypto);
2839 name = t_alg->algt.alg.crypto.cra_driver_name; 2832 name = t_alg->algt.alg.crypto.cra_driver_name;
2840 authenc = authenc ? !authenc :
2841 !(bool)memcmp(name, "authenc", 7);
2842 break; 2833 break;
2843 case CRYPTO_ALG_TYPE_AHASH: 2834 case CRYPTO_ALG_TYPE_AHASH:
2844 err = crypto_register_ahash( 2835 err = crypto_register_ahash(
@@ -2851,25 +2842,8 @@ authencesn:
2851 dev_err(dev, "%s alg registration failed\n", 2842 dev_err(dev, "%s alg registration failed\n",
2852 name); 2843 name);
2853 kfree(t_alg); 2844 kfree(t_alg);
2854 } else { 2845 } else
2855 list_add_tail(&t_alg->entry, &priv->alg_list); 2846 list_add_tail(&t_alg->entry, &priv->alg_list);
2856 if (authenc) {
2857 struct crypto_alg *alg =
2858 &driver_algs[i].alg.crypto;
2859
2860 name = alg->cra_name;
2861 memmove(name + 10, name + 7,
2862 strlen(name) - 7);
2863 memcpy(name + 7, "esn", 3);
2864
2865 name = alg->cra_driver_name;
2866 memmove(name + 10, name + 7,
2867 strlen(name) - 7);
2868 memcpy(name + 7, "esn", 3);
2869
2870 goto authencesn;
2871 }
2872 }
2873 } 2847 }
2874 } 2848 }
2875 if (!list_empty(&priv->alg_list)) 2849 if (!list_empty(&priv->alg_list))
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index c599558faeda..43a5329d4483 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1001,6 +1001,13 @@ static inline void convert_burst(u32 *maxburst)
1001 *maxburst = 0; 1001 *maxburst = 0;
1002} 1002}
1003 1003
1004static inline void convert_slave_id(struct dw_dma_chan *dwc)
1005{
1006 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1007
1008 dwc->dma_sconfig.slave_id -= dw->request_line_base;
1009}
1010
1004static int 1011static int
1005set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) 1012set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
1006{ 1013{
@@ -1015,6 +1022,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
1015 1022
1016 convert_burst(&dwc->dma_sconfig.src_maxburst); 1023 convert_burst(&dwc->dma_sconfig.src_maxburst);
1017 convert_burst(&dwc->dma_sconfig.dst_maxburst); 1024 convert_burst(&dwc->dma_sconfig.dst_maxburst);
1025 convert_slave_id(dwc);
1018 1026
1019 return 0; 1027 return 0;
1020} 1028}
@@ -1276,9 +1284,9 @@ static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
1276 if (dma_spec->args_count != 3) 1284 if (dma_spec->args_count != 3)
1277 return NULL; 1285 return NULL;
1278 1286
1279 fargs.req = be32_to_cpup(dma_spec->args+0); 1287 fargs.req = dma_spec->args[0];
1280 fargs.src = be32_to_cpup(dma_spec->args+1); 1288 fargs.src = dma_spec->args[1];
1281 fargs.dst = be32_to_cpup(dma_spec->args+2); 1289 fargs.dst = dma_spec->args[2];
1282 1290
1283 if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS || 1291 if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
1284 fargs.src >= dw->nr_masters || 1292 fargs.src >= dw->nr_masters ||
@@ -1628,6 +1636,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
1628 1636
1629static int dw_probe(struct platform_device *pdev) 1637static int dw_probe(struct platform_device *pdev)
1630{ 1638{
1639 const struct platform_device_id *match;
1631 struct dw_dma_platform_data *pdata; 1640 struct dw_dma_platform_data *pdata;
1632 struct resource *io; 1641 struct resource *io;
1633 struct dw_dma *dw; 1642 struct dw_dma *dw;
@@ -1711,6 +1720,11 @@ static int dw_probe(struct platform_device *pdev)
1711 memcpy(dw->data_width, pdata->data_width, 4); 1720 memcpy(dw->data_width, pdata->data_width, 4);
1712 } 1721 }
1713 1722
1723 /* Get the base request line if set */
1724 match = platform_get_device_id(pdev);
1725 if (match)
1726 dw->request_line_base = (unsigned int)match->driver_data;
1727
1714 /* Calculate all channel mask before DMA setup */ 1728 /* Calculate all channel mask before DMA setup */
1715 dw->all_chan_mask = (1 << nr_channels) - 1; 1729 dw->all_chan_mask = (1 << nr_channels) - 1;
1716 1730
@@ -1906,7 +1920,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table);
1906#endif 1920#endif
1907 1921
1908static const struct platform_device_id dw_dma_ids[] = { 1922static const struct platform_device_id dw_dma_ids[] = {
1909 { "INTL9C60", 0 }, 1923 /* Name, Request Line Base */
1924 { "INTL9C60", (kernel_ulong_t)16 },
1910 { } 1925 { }
1911}; 1926};
1912 1927
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index cf0ce5c77d60..4d02c3669b75 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -247,6 +247,7 @@ struct dw_dma {
247 /* hardware configuration */ 247 /* hardware configuration */
248 unsigned char nr_masters; 248 unsigned char nr_masters;
249 unsigned char data_width[4]; 249 unsigned char data_width[4];
250 unsigned int request_line_base;
250 251
251 struct dw_dma_chan chan[0]; 252 struct dw_dma_chan chan[0];
252}; 253};
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 910b0116c128..e1d13c463c90 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2048,12 +2048,18 @@ static int init_csrows(struct mem_ctl_info *mci)
2048 edac_dbg(1, "MC node: %d, csrow: %d\n", 2048 edac_dbg(1, "MC node: %d, csrow: %d\n",
2049 pvt->mc_node_id, i); 2049 pvt->mc_node_id, i);
2050 2050
2051 if (row_dct0) 2051 if (row_dct0) {
2052 nr_pages = amd64_csrow_nr_pages(pvt, 0, i); 2052 nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2053 csrow->channels[0]->dimm->nr_pages = nr_pages;
2054 }
2053 2055
2054 /* K8 has only one DCT */ 2056 /* K8 has only one DCT */
2055 if (boot_cpu_data.x86 != 0xf && row_dct1) 2057 if (boot_cpu_data.x86 != 0xf && row_dct1) {
2056 nr_pages += amd64_csrow_nr_pages(pvt, 1, i); 2058 int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i);
2059
2060 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2061 nr_pages += row_dct1_pages;
2062 }
2057 2063
2058 mtype = amd64_determine_memory_type(pvt, i); 2064 mtype = amd64_determine_memory_type(pvt, i);
2059 2065
@@ -2072,9 +2078,7 @@ static int init_csrows(struct mem_ctl_info *mci)
2072 dimm = csrow->channels[j]->dimm; 2078 dimm = csrow->channels[j]->dimm;
2073 dimm->mtype = mtype; 2079 dimm->mtype = mtype;
2074 dimm->edac_mode = edac_mode; 2080 dimm->edac_mode = edac_mode;
2075 dimm->nr_pages = nr_pages;
2076 } 2081 }
2077 csrow->nr_pages = nr_pages;
2078 } 2082 }
2079 2083
2080 return empty; 2084 return empty;
@@ -2419,7 +2423,6 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2419 2423
2420 mci->pvt_info = pvt; 2424 mci->pvt_info = pvt;
2421 mci->pdev = &pvt->F2->dev; 2425 mci->pdev = &pvt->F2->dev;
2422 mci->csbased = 1;
2423 2426
2424 setup_mci_misc_attrs(mci, fam_type); 2427 setup_mci_misc_attrs(mci, fam_type);
2425 2428
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index cdb81aa73ab7..27e86d938262 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -86,7 +86,7 @@ static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
86 edac_dimm_info_location(dimm, location, sizeof(location)); 86 edac_dimm_info_location(dimm, location, sizeof(location));
87 87
88 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", 88 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
89 dimm->mci->mem_is_per_rank ? "rank" : "dimm", 89 dimm->mci->csbased ? "rank" : "dimm",
90 number, location, dimm->csrow, dimm->cschannel); 90 number, location, dimm->csrow, dimm->cschannel);
91 edac_dbg(4, " dimm = %p\n", dimm); 91 edac_dbg(4, " dimm = %p\n", dimm);
92 edac_dbg(4, " dimm->label = '%s'\n", dimm->label); 92 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
@@ -341,7 +341,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
341 memcpy(mci->layers, layers, sizeof(*layer) * n_layers); 341 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
342 mci->nr_csrows = tot_csrows; 342 mci->nr_csrows = tot_csrows;
343 mci->num_cschannel = tot_channels; 343 mci->num_cschannel = tot_channels;
344 mci->mem_is_per_rank = per_rank; 344 mci->csbased = per_rank;
345 345
346 /* 346 /*
347 * Alocate and fill the csrow/channels structs 347 * Alocate and fill the csrow/channels structs
@@ -1235,7 +1235,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1235 * incrementing the compat API counters 1235 * incrementing the compat API counters
1236 */ 1236 */
1237 edac_dbg(4, "%s csrows map: (%d,%d)\n", 1237 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1238 mci->mem_is_per_rank ? "rank" : "dimm", 1238 mci->csbased ? "rank" : "dimm",
1239 dimm->csrow, dimm->cschannel); 1239 dimm->csrow, dimm->cschannel);
1240 if (row == -1) 1240 if (row == -1)
1241 row = dimm->csrow; 1241 row = dimm->csrow;
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4f4b6137d74e..5899a76eec3b 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -143,7 +143,7 @@ static const char *edac_caps[] = {
143 * and the per-dimm/per-rank one 143 * and the per-dimm/per-rank one
144 */ 144 */
145#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \ 145#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
146 struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) 146 static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
147 147
148struct dev_ch_attribute { 148struct dev_ch_attribute {
149 struct device_attribute attr; 149 struct device_attribute attr;
@@ -180,9 +180,6 @@ static ssize_t csrow_size_show(struct device *dev,
180 int i; 180 int i;
181 u32 nr_pages = 0; 181 u32 nr_pages = 0;
182 182
183 if (csrow->mci->csbased)
184 return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
185
186 for (i = 0; i < csrow->nr_channels; i++) 183 for (i = 0; i < csrow->nr_channels; i++)
187 nr_pages += csrow->channels[i]->dimm->nr_pages; 184 nr_pages += csrow->channels[i]->dimm->nr_pages;
188 return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); 185 return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
@@ -612,7 +609,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
612 device_initialize(&dimm->dev); 609 device_initialize(&dimm->dev);
613 610
614 dimm->dev.parent = &mci->dev; 611 dimm->dev.parent = &mci->dev;
615 if (mci->mem_is_per_rank) 612 if (mci->csbased)
616 dev_set_name(&dimm->dev, "rank%d", index); 613 dev_set_name(&dimm->dev, "rank%d", index);
617 else 614 else
618 dev_set_name(&dimm->dev, "dimm%d", index); 615 dev_set_name(&dimm->dev, "dimm%d", index);
@@ -778,14 +775,10 @@ static ssize_t mci_size_mb_show(struct device *dev,
778 for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { 775 for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
779 struct csrow_info *csrow = mci->csrows[csrow_idx]; 776 struct csrow_info *csrow = mci->csrows[csrow_idx];
780 777
781 if (csrow->mci->csbased) { 778 for (j = 0; j < csrow->nr_channels; j++) {
782 total_pages += csrow->nr_pages; 779 struct dimm_info *dimm = csrow->channels[j]->dimm;
783 } else {
784 for (j = 0; j < csrow->nr_channels; j++) {
785 struct dimm_info *dimm = csrow->channels[j]->dimm;
786 780
787 total_pages += dimm->nr_pages; 781 total_pages += dimm->nr_pages;
788 }
789 } 782 }
790 } 783 }
791 784
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index b70e3815c459..8f3c947b0029 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -32,6 +32,38 @@
32#define DEV_NAME "max77693-muic" 32#define DEV_NAME "max77693-muic"
33#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */ 33#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
34 34
35/*
36 * Default value of MAX77693 register to bring up MUIC device.
37 * If user don't set some initial value for MUIC device through platform data,
38 * extcon-max77693 driver use 'default_init_data' to bring up base operation
39 * of MAX77693 MUIC device.
40 */
41struct max77693_reg_data default_init_data[] = {
42 {
43 /* STATUS2 - [3]ChgDetRun */
44 .addr = MAX77693_MUIC_REG_STATUS2,
45 .data = STATUS2_CHGDETRUN_MASK,
46 }, {
47 /* INTMASK1 - Unmask [3]ADC1KM,[0]ADCM */
48 .addr = MAX77693_MUIC_REG_INTMASK1,
49 .data = INTMASK1_ADC1K_MASK
50 | INTMASK1_ADC_MASK,
51 }, {
52 /* INTMASK2 - Unmask [0]ChgTypM */
53 .addr = MAX77693_MUIC_REG_INTMASK2,
54 .data = INTMASK2_CHGTYP_MASK,
55 }, {
56 /* INTMASK3 - Mask all of interrupts */
57 .addr = MAX77693_MUIC_REG_INTMASK3,
58 .data = 0x0,
59 }, {
60 /* CDETCTRL2 */
61 .addr = MAX77693_MUIC_REG_CDETCTRL2,
62 .data = CDETCTRL2_VIDRMEN_MASK
63 | CDETCTRL2_DXOVPEN_MASK,
64 },
65};
66
35enum max77693_muic_adc_debounce_time { 67enum max77693_muic_adc_debounce_time {
36 ADC_DEBOUNCE_TIME_5MS = 0, 68 ADC_DEBOUNCE_TIME_5MS = 0,
37 ADC_DEBOUNCE_TIME_10MS, 69 ADC_DEBOUNCE_TIME_10MS,
@@ -1045,8 +1077,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
1045{ 1077{
1046 struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent); 1078 struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent);
1047 struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev); 1079 struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
1048 struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
1049 struct max77693_muic_info *info; 1080 struct max77693_muic_info *info;
1081 struct max77693_reg_data *init_data;
1082 int num_init_data;
1050 int delay_jiffies; 1083 int delay_jiffies;
1051 int ret; 1084 int ret;
1052 int i; 1085 int i;
@@ -1145,15 +1178,25 @@ static int max77693_muic_probe(struct platform_device *pdev)
1145 goto err_irq; 1178 goto err_irq;
1146 } 1179 }
1147 1180
1148 /* Initialize MUIC register by using platform data */ 1181
1149 for (i = 0 ; i < muic_pdata->num_init_data ; i++) { 1182 /* Initialize MUIC register by using platform data or default data */
1150 enum max77693_irq_source irq_src = MAX77693_IRQ_GROUP_NR; 1183 if (pdata->muic_data) {
1184 init_data = pdata->muic_data->init_data;
1185 num_init_data = pdata->muic_data->num_init_data;
1186 } else {
1187 init_data = default_init_data;
1188 num_init_data = ARRAY_SIZE(default_init_data);
1189 }
1190
1191 for (i = 0 ; i < num_init_data ; i++) {
1192 enum max77693_irq_source irq_src
1193 = MAX77693_IRQ_GROUP_NR;
1151 1194
1152 max77693_write_reg(info->max77693->regmap_muic, 1195 max77693_write_reg(info->max77693->regmap_muic,
1153 muic_pdata->init_data[i].addr, 1196 init_data[i].addr,
1154 muic_pdata->init_data[i].data); 1197 init_data[i].data);
1155 1198
1156 switch (muic_pdata->init_data[i].addr) { 1199 switch (init_data[i].addr) {
1157 case MAX77693_MUIC_REG_INTMASK1: 1200 case MAX77693_MUIC_REG_INTMASK1:
1158 irq_src = MUIC_INT1; 1201 irq_src = MUIC_INT1;
1159 break; 1202 break;
@@ -1167,22 +1210,40 @@ static int max77693_muic_probe(struct platform_device *pdev)
1167 1210
1168 if (irq_src < MAX77693_IRQ_GROUP_NR) 1211 if (irq_src < MAX77693_IRQ_GROUP_NR)
1169 info->max77693->irq_masks_cur[irq_src] 1212 info->max77693->irq_masks_cur[irq_src]
1170 = muic_pdata->init_data[i].data; 1213 = init_data[i].data;
1171 } 1214 }
1172 1215
1173 /* 1216 if (pdata->muic_data) {
1174 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB 1217 struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
1175 * h/w path of COMP2/COMN1 on CONTROL1 register.
1176 */
1177 if (muic_pdata->path_uart)
1178 info->path_uart = muic_pdata->path_uart;
1179 else
1180 info->path_uart = CONTROL1_SW_UART;
1181 1218
1182 if (muic_pdata->path_usb) 1219 /*
1183 info->path_usb = muic_pdata->path_usb; 1220 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
1184 else 1221 * h/w path of COMP2/COMN1 on CONTROL1 register.
1222 */
1223 if (muic_pdata->path_uart)
1224 info->path_uart = muic_pdata->path_uart;
1225 else
1226 info->path_uart = CONTROL1_SW_UART;
1227
1228 if (muic_pdata->path_usb)
1229 info->path_usb = muic_pdata->path_usb;
1230 else
1231 info->path_usb = CONTROL1_SW_USB;
1232
1233 /*
1234 * Default delay time for detecting cable state
1235 * after certain time.
1236 */
1237 if (muic_pdata->detcable_delay_ms)
1238 delay_jiffies =
1239 msecs_to_jiffies(muic_pdata->detcable_delay_ms);
1240 else
1241 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
1242 } else {
1185 info->path_usb = CONTROL1_SW_USB; 1243 info->path_usb = CONTROL1_SW_USB;
1244 info->path_uart = CONTROL1_SW_UART;
1245 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
1246 }
1186 1247
1187 /* Set initial path for UART */ 1248 /* Set initial path for UART */
1188 max77693_muic_set_path(info, info->path_uart, true); 1249 max77693_muic_set_path(info, info->path_uart, true);
@@ -1208,10 +1269,6 @@ static int max77693_muic_probe(struct platform_device *pdev)
1208 * driver should notify cable state to upper layer. 1269 * driver should notify cable state to upper layer.
1209 */ 1270 */
1210 INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq); 1271 INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq);
1211 if (muic_pdata->detcable_delay_ms)
1212 delay_jiffies = msecs_to_jiffies(muic_pdata->detcable_delay_ms);
1213 else
1214 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
1215 schedule_delayed_work(&info->wq_detcable, delay_jiffies); 1272 schedule_delayed_work(&info->wq_detcable, delay_jiffies);
1216 1273
1217 return ret; 1274 return ret;
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index e636d950ad6c..69641bcae325 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -712,29 +712,45 @@ static int max8997_muic_probe(struct platform_device *pdev)
712 goto err_irq; 712 goto err_irq;
713 } 713 }
714 714
715 /* Initialize registers according to platform data */
716 if (pdata->muic_pdata) { 715 if (pdata->muic_pdata) {
717 struct max8997_muic_platform_data *mdata = info->muic_pdata; 716 struct max8997_muic_platform_data *muic_pdata
718 717 = pdata->muic_pdata;
719 for (i = 0; i < mdata->num_init_data; i++) { 718
720 max8997_write_reg(info->muic, mdata->init_data[i].addr, 719 /* Initialize registers according to platform data */
721 mdata->init_data[i].data); 720 for (i = 0; i < muic_pdata->num_init_data; i++) {
721 max8997_write_reg(info->muic,
722 muic_pdata->init_data[i].addr,
723 muic_pdata->init_data[i].data);
722 } 724 }
723 }
724 725
725 /* 726 /*
726 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB 727 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
727 * h/w path of COMP2/COMN1 on CONTROL1 register. 728 * h/w path of COMP2/COMN1 on CONTROL1 register.
728 */ 729 */
729 if (pdata->muic_pdata->path_uart) 730 if (muic_pdata->path_uart)
730 info->path_uart = pdata->muic_pdata->path_uart; 731 info->path_uart = muic_pdata->path_uart;
731 else 732 else
732 info->path_uart = CONTROL1_SW_UART; 733 info->path_uart = CONTROL1_SW_UART;
733 734
734 if (pdata->muic_pdata->path_usb) 735 if (muic_pdata->path_usb)
735 info->path_usb = pdata->muic_pdata->path_usb; 736 info->path_usb = muic_pdata->path_usb;
736 else 737 else
738 info->path_usb = CONTROL1_SW_USB;
739
740 /*
741 * Default delay time for detecting cable state
742 * after certain time.
743 */
744 if (muic_pdata->detcable_delay_ms)
745 delay_jiffies =
746 msecs_to_jiffies(muic_pdata->detcable_delay_ms);
747 else
748 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
749 } else {
750 info->path_uart = CONTROL1_SW_UART;
737 info->path_usb = CONTROL1_SW_USB; 751 info->path_usb = CONTROL1_SW_USB;
752 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
753 }
738 754
739 /* Set initial path for UART */ 755 /* Set initial path for UART */
740 max8997_muic_set_path(info, info->path_uart, true); 756 max8997_muic_set_path(info, info->path_uart, true);
@@ -751,10 +767,6 @@ static int max8997_muic_probe(struct platform_device *pdev)
751 * driver should notify cable state to upper layer. 767 * driver should notify cable state to upper layer.
752 */ 768 */
753 INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq); 769 INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq);
754 if (pdata->muic_pdata->detcable_delay_ms)
755 delay_jiffies = msecs_to_jiffies(pdata->muic_pdata->detcable_delay_ms);
756 else
757 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
758 schedule_delayed_work(&info->wq_detcable, delay_jiffies); 770 schedule_delayed_work(&info->wq_detcable, delay_jiffies);
759 771
760 return 0; 772 return 0;
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 9b00072a020f..42c759a4d047 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -53,6 +53,24 @@ config EFI_VARS
53 Subsequent efibootmgr releases may be found at: 53 Subsequent efibootmgr releases may be found at:
54 <http://linux.dell.com/efibootmgr> 54 <http://linux.dell.com/efibootmgr>
55 55
56config EFI_VARS_PSTORE
57 bool "Register efivars backend for pstore"
58 depends on EFI_VARS && PSTORE
59 default y
60 help
61 Say Y here to enable use efivars as a backend to pstore. This
62 will allow writing console messages, crash dumps, or anything
63 else supported by pstore to EFI variables.
64
65config EFI_VARS_PSTORE_DEFAULT_DISABLE
66 bool "Disable using efivars as a pstore backend by default"
67 depends on EFI_VARS_PSTORE
68 default n
69 help
70 Saying Y here will disable the use of efivars as a storage
71 backend for pstore by default. This setting can be overridden
72 using the efivars module's pstore_disable parameter.
73
56config EFI_PCDP 74config EFI_PCDP
57 bool "Console device selection via EFI PCDP or HCDP table" 75 bool "Console device selection via EFI PCDP or HCDP table"
58 depends on ACPI && EFI && IA64 76 depends on ACPI && EFI && IA64
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index fe62aa392239..7acafb80fd4c 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -103,6 +103,11 @@ MODULE_VERSION(EFIVARS_VERSION);
103 */ 103 */
104#define GUID_LEN 36 104#define GUID_LEN 36
105 105
106static bool efivars_pstore_disable =
107 IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
108
109module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
110
106/* 111/*
107 * The maximum size of VariableName + Data = 1024 112 * The maximum size of VariableName + Data = 1024
108 * Therefore, it's reasonable to save that much 113 * Therefore, it's reasonable to save that much
@@ -165,6 +170,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
165 170
166static void efivar_update_sysfs_entries(struct work_struct *); 171static void efivar_update_sysfs_entries(struct work_struct *);
167static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries); 172static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
173static bool efivar_wq_enabled = true;
168 174
169/* Return the number of unicode characters in data */ 175/* Return the number of unicode characters in data */
170static unsigned long 176static unsigned long
@@ -1309,9 +1315,7 @@ static const struct inode_operations efivarfs_dir_inode_operations = {
1309 .create = efivarfs_create, 1315 .create = efivarfs_create,
1310}; 1316};
1311 1317
1312static struct pstore_info efi_pstore_info; 1318#ifdef CONFIG_EFI_VARS_PSTORE
1313
1314#ifdef CONFIG_PSTORE
1315 1319
1316static int efi_pstore_open(struct pstore_info *psi) 1320static int efi_pstore_open(struct pstore_info *psi)
1317{ 1321{
@@ -1441,7 +1445,7 @@ static int efi_pstore_write(enum pstore_type_id type,
1441 1445
1442 spin_unlock_irqrestore(&efivars->lock, flags); 1446 spin_unlock_irqrestore(&efivars->lock, flags);
1443 1447
1444 if (reason == KMSG_DUMP_OOPS) 1448 if (reason == KMSG_DUMP_OOPS && efivar_wq_enabled)
1445 schedule_work(&efivar_work); 1449 schedule_work(&efivar_work);
1446 1450
1447 *id = part; 1451 *id = part;
@@ -1514,38 +1518,6 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
1514 1518
1515 return 0; 1519 return 0;
1516} 1520}
1517#else
1518static int efi_pstore_open(struct pstore_info *psi)
1519{
1520 return 0;
1521}
1522
1523static int efi_pstore_close(struct pstore_info *psi)
1524{
1525 return 0;
1526}
1527
1528static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count,
1529 struct timespec *timespec,
1530 char **buf, struct pstore_info *psi)
1531{
1532 return -1;
1533}
1534
1535static int efi_pstore_write(enum pstore_type_id type,
1536 enum kmsg_dump_reason reason, u64 *id,
1537 unsigned int part, int count, size_t size,
1538 struct pstore_info *psi)
1539{
1540 return 0;
1541}
1542
1543static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
1544 struct timespec time, struct pstore_info *psi)
1545{
1546 return 0;
1547}
1548#endif
1549 1521
1550static struct pstore_info efi_pstore_info = { 1522static struct pstore_info efi_pstore_info = {
1551 .owner = THIS_MODULE, 1523 .owner = THIS_MODULE,
@@ -1557,6 +1529,24 @@ static struct pstore_info efi_pstore_info = {
1557 .erase = efi_pstore_erase, 1529 .erase = efi_pstore_erase,
1558}; 1530};
1559 1531
1532static void efivar_pstore_register(struct efivars *efivars)
1533{
1534 efivars->efi_pstore_info = efi_pstore_info;
1535 efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
1536 if (efivars->efi_pstore_info.buf) {
1537 efivars->efi_pstore_info.bufsize = 1024;
1538 efivars->efi_pstore_info.data = efivars;
1539 spin_lock_init(&efivars->efi_pstore_info.buf_lock);
1540 pstore_register(&efivars->efi_pstore_info);
1541 }
1542}
1543#else
1544static void efivar_pstore_register(struct efivars *efivars)
1545{
1546 return;
1547}
1548#endif
1549
1560static ssize_t efivar_create(struct file *filp, struct kobject *kobj, 1550static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
1561 struct bin_attribute *bin_attr, 1551 struct bin_attribute *bin_attr,
1562 char *buf, loff_t pos, size_t count) 1552 char *buf, loff_t pos, size_t count)
@@ -1716,6 +1706,31 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
1716 return found; 1706 return found;
1717} 1707}
1718 1708
1709/*
1710 * Returns the size of variable_name, in bytes, including the
1711 * terminating NULL character, or variable_name_size if no NULL
1712 * character is found among the first variable_name_size bytes.
1713 */
1714static unsigned long var_name_strnsize(efi_char16_t *variable_name,
1715 unsigned long variable_name_size)
1716{
1717 unsigned long len;
1718 efi_char16_t c;
1719
1720 /*
1721 * The variable name is, by definition, a NULL-terminated
1722 * string, so make absolutely sure that variable_name_size is
1723 * the value we expect it to be. If not, return the real size.
1724 */
1725 for (len = 2; len <= variable_name_size; len += sizeof(c)) {
1726 c = variable_name[(len / sizeof(c)) - 1];
1727 if (!c)
1728 break;
1729 }
1730
1731 return min(len, variable_name_size);
1732}
1733
1719static void efivar_update_sysfs_entries(struct work_struct *work) 1734static void efivar_update_sysfs_entries(struct work_struct *work)
1720{ 1735{
1721 struct efivars *efivars = &__efivars; 1736 struct efivars *efivars = &__efivars;
@@ -1756,10 +1771,13 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
1756 if (!found) { 1771 if (!found) {
1757 kfree(variable_name); 1772 kfree(variable_name);
1758 break; 1773 break;
1759 } else 1774 } else {
1775 variable_name_size = var_name_strnsize(variable_name,
1776 variable_name_size);
1760 efivar_create_sysfs_entry(efivars, 1777 efivar_create_sysfs_entry(efivars,
1761 variable_name_size, 1778 variable_name_size,
1762 variable_name, &vendor); 1779 variable_name, &vendor);
1780 }
1763 } 1781 }
1764} 1782}
1765 1783
@@ -1958,6 +1976,35 @@ void unregister_efivars(struct efivars *efivars)
1958} 1976}
1959EXPORT_SYMBOL_GPL(unregister_efivars); 1977EXPORT_SYMBOL_GPL(unregister_efivars);
1960 1978
1979/*
1980 * Print a warning when duplicate EFI variables are encountered and
1981 * disable the sysfs workqueue since the firmware is buggy.
1982 */
1983static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
1984 unsigned long len16)
1985{
1986 size_t i, len8 = len16 / sizeof(efi_char16_t);
1987 char *s8;
1988
1989 /*
1990 * Disable the workqueue since the algorithm it uses for
1991 * detecting new variables won't work with this buggy
1992 * implementation of GetNextVariableName().
1993 */
1994 efivar_wq_enabled = false;
1995
1996 s8 = kzalloc(len8, GFP_KERNEL);
1997 if (!s8)
1998 return;
1999
2000 for (i = 0; i < len8; i++)
2001 s8[i] = s16[i];
2002
2003 printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
2004 s8, vendor_guid);
2005 kfree(s8);
2006}
2007
1961int register_efivars(struct efivars *efivars, 2008int register_efivars(struct efivars *efivars,
1962 const struct efivar_operations *ops, 2009 const struct efivar_operations *ops,
1963 struct kobject *parent_kobj) 2010 struct kobject *parent_kobj)
@@ -2006,6 +2053,24 @@ int register_efivars(struct efivars *efivars,
2006 &vendor_guid); 2053 &vendor_guid);
2007 switch (status) { 2054 switch (status) {
2008 case EFI_SUCCESS: 2055 case EFI_SUCCESS:
2056 variable_name_size = var_name_strnsize(variable_name,
2057 variable_name_size);
2058
2059 /*
2060 * Some firmware implementations return the
2061 * same variable name on multiple calls to
2062 * get_next_variable(). Terminate the loop
2063 * immediately as there is no guarantee that
2064 * we'll ever see a different variable name,
2065 * and may end up looping here forever.
2066 */
2067 if (variable_is_present(variable_name, &vendor_guid)) {
2068 dup_variable_bug(variable_name, &vendor_guid,
2069 variable_name_size);
2070 status = EFI_NOT_FOUND;
2071 break;
2072 }
2073
2009 efivar_create_sysfs_entry(efivars, 2074 efivar_create_sysfs_entry(efivars,
2010 variable_name_size, 2075 variable_name_size,
2011 variable_name, 2076 variable_name,
@@ -2025,15 +2090,8 @@ int register_efivars(struct efivars *efivars,
2025 if (error) 2090 if (error)
2026 unregister_efivars(efivars); 2091 unregister_efivars(efivars);
2027 2092
2028 efivars->efi_pstore_info = efi_pstore_info; 2093 if (!efivars_pstore_disable)
2029 2094 efivar_pstore_register(efivars);
2030 efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
2031 if (efivars->efi_pstore_info.buf) {
2032 efivars->efi_pstore_info.bufsize = 1024;
2033 efivars->efi_pstore_info.data = efivars;
2034 spin_lock_init(&efivars->efi_pstore_info.buf_lock);
2035 pstore_register(&efivars->efi_pstore_info);
2036 }
2037 2095
2038 register_filesystem(&efivarfs_type); 2096 register_filesystem(&efivarfs_type);
2039 2097
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a71a54a3e3f7..5150df6cba08 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -193,7 +193,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
193 if (!np) 193 if (!np)
194 return; 194 return;
195 195
196 do { 196 for (;; index++) {
197 ret = of_parse_phandle_with_args(np, "gpio-ranges", 197 ret = of_parse_phandle_with_args(np, "gpio-ranges",
198 "#gpio-range-cells", index, &pinspec); 198 "#gpio-range-cells", index, &pinspec);
199 if (ret) 199 if (ret)
@@ -222,8 +222,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
222 222
223 if (ret) 223 if (ret)
224 break; 224 break;
225 225 }
226 } while (index++);
227} 226}
228 227
229#else 228#else
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c194f4e680ad..e2acfdbf7d3c 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1634,7 +1634,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
1634 unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; 1634 unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
1635 unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; 1635 unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
1636 unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; 1636 unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
1637 unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; 1637 unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
1638 unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); 1638 unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
1639 1639
1640 /* ignore tiny modes */ 1640 /* ignore tiny modes */
@@ -1715,6 +1715,7 @@ set_size:
1715 } 1715 }
1716 1716
1717 mode->type = DRM_MODE_TYPE_DRIVER; 1717 mode->type = DRM_MODE_TYPE_DRIVER;
1718 mode->vrefresh = drm_mode_vrefresh(mode);
1718 drm_mode_set_name(mode); 1719 drm_mode_set_name(mode);
1719 1720
1720 return mode; 1721 return mode;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 36493ce71f9a..98cc14725ba9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -38,11 +38,12 @@
38/* position control register for hardware window 0, 2 ~ 4.*/ 38/* position control register for hardware window 0, 2 ~ 4.*/
39#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16) 39#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
40#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16) 40#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16)
41/* size control register for hardware window 0. */ 41/*
42#define VIDOSD_C_SIZE_W0 (VIDOSD_BASE + 0x08) 42 * size control register for hardware windows 0 and alpha control register
43/* alpha control register for hardware window 1 ~ 4. */ 43 * for hardware windows 1 ~ 4
44#define VIDOSD_C(win) (VIDOSD_BASE + 0x18 + (win) * 16) 44 */
45/* size control register for hardware window 1 ~ 4. */ 45#define VIDOSD_C(win) (VIDOSD_BASE + 0x08 + (win) * 16)
46/* size control register for hardware windows 1 ~ 2. */
46#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16) 47#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16)
47 48
48#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8) 49#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8)
@@ -50,9 +51,9 @@
50#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4) 51#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4)
51 52
52/* color key control register for hardware window 1 ~ 4. */ 53/* color key control register for hardware window 1 ~ 4. */
53#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + (x * 8)) 54#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + ((x - 1) * 8))
54/* color key value register for hardware window 1 ~ 4. */ 55/* color key value register for hardware window 1 ~ 4. */
55#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + (x * 8)) 56#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8))
56 57
57/* FIMD has totally five hardware windows. */ 58/* FIMD has totally five hardware windows. */
58#define WINDOWS_NR 5 59#define WINDOWS_NR 5
@@ -109,9 +110,9 @@ struct fimd_context {
109 110
110#ifdef CONFIG_OF 111#ifdef CONFIG_OF
111static const struct of_device_id fimd_driver_dt_match[] = { 112static const struct of_device_id fimd_driver_dt_match[] = {
112 { .compatible = "samsung,exynos4-fimd", 113 { .compatible = "samsung,exynos4210-fimd",
113 .data = &exynos4_fimd_driver_data }, 114 .data = &exynos4_fimd_driver_data },
114 { .compatible = "samsung,exynos5-fimd", 115 { .compatible = "samsung,exynos5250-fimd",
115 .data = &exynos5_fimd_driver_data }, 116 .data = &exynos5_fimd_driver_data },
116 {}, 117 {},
117}; 118};
@@ -581,7 +582,7 @@ static void fimd_win_commit(struct device *dev, int zpos)
581 if (win != 3 && win != 4) { 582 if (win != 3 && win != 4) {
582 u32 offset = VIDOSD_D(win); 583 u32 offset = VIDOSD_D(win);
583 if (win == 0) 584 if (win == 0)
584 offset = VIDOSD_C_SIZE_W0; 585 offset = VIDOSD_C(win);
585 val = win_data->ovl_width * win_data->ovl_height; 586 val = win_data->ovl_width * win_data->ovl_height;
586 writel(val, ctx->regs + offset); 587 writel(val, ctx->regs + offset);
587 588
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3b0da0378acf..47a493c8a71f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -48,8 +48,14 @@
48 48
49/* registers for base address */ 49/* registers for base address */
50#define G2D_SRC_BASE_ADDR 0x0304 50#define G2D_SRC_BASE_ADDR 0x0304
51#define G2D_SRC_COLOR_MODE 0x030C
52#define G2D_SRC_LEFT_TOP 0x0310
53#define G2D_SRC_RIGHT_BOTTOM 0x0314
51#define G2D_SRC_PLANE2_BASE_ADDR 0x0318 54#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
52#define G2D_DST_BASE_ADDR 0x0404 55#define G2D_DST_BASE_ADDR 0x0404
56#define G2D_DST_COLOR_MODE 0x040C
57#define G2D_DST_LEFT_TOP 0x0410
58#define G2D_DST_RIGHT_BOTTOM 0x0414
53#define G2D_DST_PLANE2_BASE_ADDR 0x0418 59#define G2D_DST_PLANE2_BASE_ADDR 0x0418
54#define G2D_PAT_BASE_ADDR 0x0500 60#define G2D_PAT_BASE_ADDR 0x0500
55#define G2D_MSK_BASE_ADDR 0x0520 61#define G2D_MSK_BASE_ADDR 0x0520
@@ -82,7 +88,7 @@
82#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17 88#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
83 89
84/* G2D_DMA_HOLD_CMD */ 90/* G2D_DMA_HOLD_CMD */
85#define G2D_USET_HOLD (1 << 2) 91#define G2D_USER_HOLD (1 << 2)
86#define G2D_LIST_HOLD (1 << 1) 92#define G2D_LIST_HOLD (1 << 1)
87#define G2D_BITBLT_HOLD (1 << 0) 93#define G2D_BITBLT_HOLD (1 << 0)
88 94
@@ -91,13 +97,27 @@
91#define G2D_START_NHOLT (1 << 1) 97#define G2D_START_NHOLT (1 << 1)
92#define G2D_START_BITBLT (1 << 0) 98#define G2D_START_BITBLT (1 << 0)
93 99
100/* buffer color format */
101#define G2D_FMT_XRGB8888 0
102#define G2D_FMT_ARGB8888 1
103#define G2D_FMT_RGB565 2
104#define G2D_FMT_XRGB1555 3
105#define G2D_FMT_ARGB1555 4
106#define G2D_FMT_XRGB4444 5
107#define G2D_FMT_ARGB4444 6
108#define G2D_FMT_PACKED_RGB888 7
109#define G2D_FMT_A8 11
110#define G2D_FMT_L8 12
111
112/* buffer valid length */
113#define G2D_LEN_MIN 1
114#define G2D_LEN_MAX 8000
115
94#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4) 116#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
95#define G2D_CMDLIST_NUM 64 117#define G2D_CMDLIST_NUM 64
96#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) 118#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
97#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) 119#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
98 120
99#define MAX_BUF_ADDR_NR 6
100
101/* maximum buffer pool size of userptr is 64MB as default */ 121/* maximum buffer pool size of userptr is 64MB as default */
102#define MAX_POOL (64 * 1024 * 1024) 122#define MAX_POOL (64 * 1024 * 1024)
103 123
@@ -106,6 +126,17 @@ enum {
106 BUF_TYPE_USERPTR, 126 BUF_TYPE_USERPTR,
107}; 127};
108 128
129enum g2d_reg_type {
130 REG_TYPE_NONE = -1,
131 REG_TYPE_SRC,
132 REG_TYPE_SRC_PLANE2,
133 REG_TYPE_DST,
134 REG_TYPE_DST_PLANE2,
135 REG_TYPE_PAT,
136 REG_TYPE_MSK,
137 MAX_REG_TYPE_NR
138};
139
109/* cmdlist data structure */ 140/* cmdlist data structure */
110struct g2d_cmdlist { 141struct g2d_cmdlist {
111 u32 head; 142 u32 head;
@@ -113,6 +144,42 @@ struct g2d_cmdlist {
113 u32 last; /* last data offset */ 144 u32 last; /* last data offset */
114}; 145};
115 146
147/*
148 * A structure of buffer description
149 *
150 * @format: color format
151 * @left_x: the x coordinates of left top corner
152 * @top_y: the y coordinates of left top corner
153 * @right_x: the x coordinates of right bottom corner
154 * @bottom_y: the y coordinates of right bottom corner
155 *
156 */
157struct g2d_buf_desc {
158 unsigned int format;
159 unsigned int left_x;
160 unsigned int top_y;
161 unsigned int right_x;
162 unsigned int bottom_y;
163};
164
165/*
166 * A structure of buffer information
167 *
168 * @map_nr: manages the number of mapped buffers
169 * @reg_types: stores regitster type in the order of requested command
170 * @handles: stores buffer handle in its reg_type position
171 * @types: stores buffer type in its reg_type position
172 * @descs: stores buffer description in its reg_type position
173 *
174 */
175struct g2d_buf_info {
176 unsigned int map_nr;
177 enum g2d_reg_type reg_types[MAX_REG_TYPE_NR];
178 unsigned long handles[MAX_REG_TYPE_NR];
179 unsigned int types[MAX_REG_TYPE_NR];
180 struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
181};
182
116struct drm_exynos_pending_g2d_event { 183struct drm_exynos_pending_g2d_event {
117 struct drm_pending_event base; 184 struct drm_pending_event base;
118 struct drm_exynos_g2d_event event; 185 struct drm_exynos_g2d_event event;
@@ -131,14 +198,11 @@ struct g2d_cmdlist_userptr {
131 bool in_pool; 198 bool in_pool;
132 bool out_of_list; 199 bool out_of_list;
133}; 200};
134
135struct g2d_cmdlist_node { 201struct g2d_cmdlist_node {
136 struct list_head list; 202 struct list_head list;
137 struct g2d_cmdlist *cmdlist; 203 struct g2d_cmdlist *cmdlist;
138 unsigned int map_nr;
139 unsigned long handles[MAX_BUF_ADDR_NR];
140 unsigned int obj_type[MAX_BUF_ADDR_NR];
141 dma_addr_t dma_addr; 204 dma_addr_t dma_addr;
205 struct g2d_buf_info buf_info;
142 206
143 struct drm_exynos_pending_g2d_event *event; 207 struct drm_exynos_pending_g2d_event *event;
144}; 208};
@@ -188,6 +252,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
188 struct exynos_drm_subdrv *subdrv = &g2d->subdrv; 252 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
189 int nr; 253 int nr;
190 int ret; 254 int ret;
255 struct g2d_buf_info *buf_info;
191 256
192 init_dma_attrs(&g2d->cmdlist_dma_attrs); 257 init_dma_attrs(&g2d->cmdlist_dma_attrs);
193 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); 258 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
@@ -209,11 +274,17 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
209 } 274 }
210 275
211 for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) { 276 for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
277 unsigned int i;
278
212 node[nr].cmdlist = 279 node[nr].cmdlist =
213 g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE; 280 g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
214 node[nr].dma_addr = 281 node[nr].dma_addr =
215 g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE; 282 g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
216 283
284 buf_info = &node[nr].buf_info;
285 for (i = 0; i < MAX_REG_TYPE_NR; i++)
286 buf_info->reg_types[i] = REG_TYPE_NONE;
287
217 list_add_tail(&node[nr].list, &g2d->free_cmdlist); 288 list_add_tail(&node[nr].list, &g2d->free_cmdlist);
218 } 289 }
219 290
@@ -450,7 +521,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
450 DMA_BIDIRECTIONAL); 521 DMA_BIDIRECTIONAL);
451 if (ret < 0) { 522 if (ret < 0) {
452 DRM_ERROR("failed to map sgt with dma region.\n"); 523 DRM_ERROR("failed to map sgt with dma region.\n");
453 goto err_free_sgt; 524 goto err_sg_free_table;
454 } 525 }
455 526
456 g2d_userptr->dma_addr = sgt->sgl[0].dma_address; 527 g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
@@ -467,8 +538,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
467 538
468 return &g2d_userptr->dma_addr; 539 return &g2d_userptr->dma_addr;
469 540
470err_free_sgt: 541err_sg_free_table:
471 sg_free_table(sgt); 542 sg_free_table(sgt);
543
544err_free_sgt:
472 kfree(sgt); 545 kfree(sgt);
473 sgt = NULL; 546 sgt = NULL;
474 547
@@ -506,36 +579,172 @@ static void g2d_userptr_free_all(struct drm_device *drm_dev,
506 g2d->current_pool = 0; 579 g2d->current_pool = 0;
507} 580}
508 581
582static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
583{
584 enum g2d_reg_type reg_type;
585
586 switch (reg_offset) {
587 case G2D_SRC_BASE_ADDR:
588 case G2D_SRC_COLOR_MODE:
589 case G2D_SRC_LEFT_TOP:
590 case G2D_SRC_RIGHT_BOTTOM:
591 reg_type = REG_TYPE_SRC;
592 break;
593 case G2D_SRC_PLANE2_BASE_ADDR:
594 reg_type = REG_TYPE_SRC_PLANE2;
595 break;
596 case G2D_DST_BASE_ADDR:
597 case G2D_DST_COLOR_MODE:
598 case G2D_DST_LEFT_TOP:
599 case G2D_DST_RIGHT_BOTTOM:
600 reg_type = REG_TYPE_DST;
601 break;
602 case G2D_DST_PLANE2_BASE_ADDR:
603 reg_type = REG_TYPE_DST_PLANE2;
604 break;
605 case G2D_PAT_BASE_ADDR:
606 reg_type = REG_TYPE_PAT;
607 break;
608 case G2D_MSK_BASE_ADDR:
609 reg_type = REG_TYPE_MSK;
610 break;
611 default:
612 reg_type = REG_TYPE_NONE;
613 DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
614 break;
615 };
616
617 return reg_type;
618}
619
620static unsigned long g2d_get_buf_bpp(unsigned int format)
621{
622 unsigned long bpp;
623
624 switch (format) {
625 case G2D_FMT_XRGB8888:
626 case G2D_FMT_ARGB8888:
627 bpp = 4;
628 break;
629 case G2D_FMT_RGB565:
630 case G2D_FMT_XRGB1555:
631 case G2D_FMT_ARGB1555:
632 case G2D_FMT_XRGB4444:
633 case G2D_FMT_ARGB4444:
634 bpp = 2;
635 break;
636 case G2D_FMT_PACKED_RGB888:
637 bpp = 3;
638 break;
639 default:
640 bpp = 1;
641 break;
642 }
643
644 return bpp;
645}
646
647static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
648 enum g2d_reg_type reg_type,
649 unsigned long size)
650{
651 unsigned int width, height;
652 unsigned long area;
653
654 /*
655 * check source and destination buffers only.
656 * so the others are always valid.
657 */
658 if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
659 return true;
660
661 width = buf_desc->right_x - buf_desc->left_x;
662 if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
663 DRM_ERROR("width[%u] is out of range!\n", width);
664 return false;
665 }
666
667 height = buf_desc->bottom_y - buf_desc->top_y;
668 if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
669 DRM_ERROR("height[%u] is out of range!\n", height);
670 return false;
671 }
672
673 area = (unsigned long)width * (unsigned long)height *
674 g2d_get_buf_bpp(buf_desc->format);
675 if (area > size) {
676 DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size);
677 return false;
678 }
679
680 return true;
681}
682
509static int g2d_map_cmdlist_gem(struct g2d_data *g2d, 683static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
510 struct g2d_cmdlist_node *node, 684 struct g2d_cmdlist_node *node,
511 struct drm_device *drm_dev, 685 struct drm_device *drm_dev,
512 struct drm_file *file) 686 struct drm_file *file)
513{ 687{
514 struct g2d_cmdlist *cmdlist = node->cmdlist; 688 struct g2d_cmdlist *cmdlist = node->cmdlist;
689 struct g2d_buf_info *buf_info = &node->buf_info;
515 int offset; 690 int offset;
691 int ret;
516 int i; 692 int i;
517 693
518 for (i = 0; i < node->map_nr; i++) { 694 for (i = 0; i < buf_info->map_nr; i++) {
695 struct g2d_buf_desc *buf_desc;
696 enum g2d_reg_type reg_type;
697 int reg_pos;
519 unsigned long handle; 698 unsigned long handle;
520 dma_addr_t *addr; 699 dma_addr_t *addr;
521 700
522 offset = cmdlist->last - (i * 2 + 1); 701 reg_pos = cmdlist->last - 2 * (i + 1);
523 handle = cmdlist->data[offset]; 702
703 offset = cmdlist->data[reg_pos];
704 handle = cmdlist->data[reg_pos + 1];
705
706 reg_type = g2d_get_reg_type(offset);
707 if (reg_type == REG_TYPE_NONE) {
708 ret = -EFAULT;
709 goto err;
710 }
711
712 buf_desc = &buf_info->descs[reg_type];
713
714 if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
715 unsigned long size;
716
717 size = exynos_drm_gem_get_size(drm_dev, handle, file);
718 if (!size) {
719 ret = -EFAULT;
720 goto err;
721 }
722
723 if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
724 size)) {
725 ret = -EFAULT;
726 goto err;
727 }
524 728
525 if (node->obj_type[i] == BUF_TYPE_GEM) {
526 addr = exynos_drm_gem_get_dma_addr(drm_dev, handle, 729 addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
527 file); 730 file);
528 if (IS_ERR(addr)) { 731 if (IS_ERR(addr)) {
529 node->map_nr = i; 732 ret = -EFAULT;
530 return -EFAULT; 733 goto err;
531 } 734 }
532 } else { 735 } else {
533 struct drm_exynos_g2d_userptr g2d_userptr; 736 struct drm_exynos_g2d_userptr g2d_userptr;
534 737
535 if (copy_from_user(&g2d_userptr, (void __user *)handle, 738 if (copy_from_user(&g2d_userptr, (void __user *)handle,
536 sizeof(struct drm_exynos_g2d_userptr))) { 739 sizeof(struct drm_exynos_g2d_userptr))) {
537 node->map_nr = i; 740 ret = -EFAULT;
538 return -EFAULT; 741 goto err;
742 }
743
744 if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
745 g2d_userptr.size)) {
746 ret = -EFAULT;
747 goto err;
539 } 748 }
540 749
541 addr = g2d_userptr_get_dma_addr(drm_dev, 750 addr = g2d_userptr_get_dma_addr(drm_dev,
@@ -544,16 +753,21 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
544 file, 753 file,
545 &handle); 754 &handle);
546 if (IS_ERR(addr)) { 755 if (IS_ERR(addr)) {
547 node->map_nr = i; 756 ret = -EFAULT;
548 return -EFAULT; 757 goto err;
549 } 758 }
550 } 759 }
551 760
552 cmdlist->data[offset] = *addr; 761 cmdlist->data[reg_pos + 1] = *addr;
553 node->handles[i] = handle; 762 buf_info->reg_types[i] = reg_type;
763 buf_info->handles[reg_type] = handle;
554 } 764 }
555 765
556 return 0; 766 return 0;
767
768err:
769 buf_info->map_nr = i;
770 return ret;
557} 771}
558 772
559static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, 773static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
@@ -561,22 +775,33 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
561 struct drm_file *filp) 775 struct drm_file *filp)
562{ 776{
563 struct exynos_drm_subdrv *subdrv = &g2d->subdrv; 777 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
778 struct g2d_buf_info *buf_info = &node->buf_info;
564 int i; 779 int i;
565 780
566 for (i = 0; i < node->map_nr; i++) { 781 for (i = 0; i < buf_info->map_nr; i++) {
567 unsigned long handle = node->handles[i]; 782 struct g2d_buf_desc *buf_desc;
783 enum g2d_reg_type reg_type;
784 unsigned long handle;
785
786 reg_type = buf_info->reg_types[i];
787
788 buf_desc = &buf_info->descs[reg_type];
789 handle = buf_info->handles[reg_type];
568 790
569 if (node->obj_type[i] == BUF_TYPE_GEM) 791 if (buf_info->types[reg_type] == BUF_TYPE_GEM)
570 exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, 792 exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
571 filp); 793 filp);
572 else 794 else
573 g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, 795 g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
574 false); 796 false);
575 797
576 node->handles[i] = 0; 798 buf_info->reg_types[i] = REG_TYPE_NONE;
799 buf_info->handles[reg_type] = 0;
800 buf_info->types[reg_type] = 0;
801 memset(buf_desc, 0x00, sizeof(*buf_desc));
577 } 802 }
578 803
579 node->map_nr = 0; 804 buf_info->map_nr = 0;
580} 805}
581 806
582static void g2d_dma_start(struct g2d_data *g2d, 807static void g2d_dma_start(struct g2d_data *g2d,
@@ -589,10 +814,6 @@ static void g2d_dma_start(struct g2d_data *g2d,
589 pm_runtime_get_sync(g2d->dev); 814 pm_runtime_get_sync(g2d->dev);
590 clk_enable(g2d->gate_clk); 815 clk_enable(g2d->gate_clk);
591 816
592 /* interrupt enable */
593 writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
594 g2d->regs + G2D_INTEN);
595
596 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); 817 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
597 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); 818 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
598} 819}
@@ -643,7 +864,6 @@ static void g2d_runqueue_worker(struct work_struct *work)
643 struct g2d_data *g2d = container_of(work, struct g2d_data, 864 struct g2d_data *g2d = container_of(work, struct g2d_data,
644 runqueue_work); 865 runqueue_work);
645 866
646
647 mutex_lock(&g2d->runqueue_mutex); 867 mutex_lock(&g2d->runqueue_mutex);
648 clk_disable(g2d->gate_clk); 868 clk_disable(g2d->gate_clk);
649 pm_runtime_put_sync(g2d->dev); 869 pm_runtime_put_sync(g2d->dev);
@@ -724,20 +944,14 @@ static int g2d_check_reg_offset(struct device *dev,
724 int i; 944 int i;
725 945
726 for (i = 0; i < nr; i++) { 946 for (i = 0; i < nr; i++) {
727 index = cmdlist->last - 2 * (i + 1); 947 struct g2d_buf_info *buf_info = &node->buf_info;
948 struct g2d_buf_desc *buf_desc;
949 enum g2d_reg_type reg_type;
950 unsigned long value;
728 951
729 if (for_addr) { 952 index = cmdlist->last - 2 * (i + 1);
730 /* check userptr buffer type. */
731 reg_offset = (cmdlist->data[index] &
732 ~0x7fffffff) >> 31;
733 if (reg_offset) {
734 node->obj_type[i] = BUF_TYPE_USERPTR;
735 cmdlist->data[index] &= ~G2D_BUF_USERPTR;
736 }
737 }
738 953
739 reg_offset = cmdlist->data[index] & ~0xfffff000; 954 reg_offset = cmdlist->data[index] & ~0xfffff000;
740
741 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) 955 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
742 goto err; 956 goto err;
743 if (reg_offset % 4) 957 if (reg_offset % 4)
@@ -753,8 +967,60 @@ static int g2d_check_reg_offset(struct device *dev,
753 if (!for_addr) 967 if (!for_addr)
754 goto err; 968 goto err;
755 969
756 if (node->obj_type[i] != BUF_TYPE_USERPTR) 970 reg_type = g2d_get_reg_type(reg_offset);
757 node->obj_type[i] = BUF_TYPE_GEM; 971 if (reg_type == REG_TYPE_NONE)
972 goto err;
973
974 /* check userptr buffer type. */
975 if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
976 buf_info->types[reg_type] = BUF_TYPE_USERPTR;
977 cmdlist->data[index] &= ~G2D_BUF_USERPTR;
978 } else
979 buf_info->types[reg_type] = BUF_TYPE_GEM;
980 break;
981 case G2D_SRC_COLOR_MODE:
982 case G2D_DST_COLOR_MODE:
983 if (for_addr)
984 goto err;
985
986 reg_type = g2d_get_reg_type(reg_offset);
987 if (reg_type == REG_TYPE_NONE)
988 goto err;
989
990 buf_desc = &buf_info->descs[reg_type];
991 value = cmdlist->data[index + 1];
992
993 buf_desc->format = value & 0xf;
994 break;
995 case G2D_SRC_LEFT_TOP:
996 case G2D_DST_LEFT_TOP:
997 if (for_addr)
998 goto err;
999
1000 reg_type = g2d_get_reg_type(reg_offset);
1001 if (reg_type == REG_TYPE_NONE)
1002 goto err;
1003
1004 buf_desc = &buf_info->descs[reg_type];
1005 value = cmdlist->data[index + 1];
1006
1007 buf_desc->left_x = value & 0x1fff;
1008 buf_desc->top_y = (value & 0x1fff0000) >> 16;
1009 break;
1010 case G2D_SRC_RIGHT_BOTTOM:
1011 case G2D_DST_RIGHT_BOTTOM:
1012 if (for_addr)
1013 goto err;
1014
1015 reg_type = g2d_get_reg_type(reg_offset);
1016 if (reg_type == REG_TYPE_NONE)
1017 goto err;
1018
1019 buf_desc = &buf_info->descs[reg_type];
1020 value = cmdlist->data[index + 1];
1021
1022 buf_desc->right_x = value & 0x1fff;
1023 buf_desc->bottom_y = (value & 0x1fff0000) >> 16;
758 break; 1024 break;
759 default: 1025 default:
760 if (for_addr) 1026 if (for_addr)
@@ -860,9 +1126,23 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
860 cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR; 1126 cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
861 cmdlist->data[cmdlist->last++] = 0; 1127 cmdlist->data[cmdlist->last++] = 0;
862 1128
1129 /*
1130 * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG
1131 * and GCF bit should be set to INTEN register if user wants
1132 * G2D interrupt event once current command list execution is
1133 * finished.
1134 * Otherwise only ACF bit should be set to INTEN register so
1135 * that one interrupt is occured after all command lists
1136 * have been completed.
1137 */
863 if (node->event) { 1138 if (node->event) {
1139 cmdlist->data[cmdlist->last++] = G2D_INTEN;
1140 cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF;
864 cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD; 1141 cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
865 cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD; 1142 cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
1143 } else {
1144 cmdlist->data[cmdlist->last++] = G2D_INTEN;
1145 cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
866 } 1146 }
867 1147
868 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ 1148 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
@@ -887,7 +1167,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
887 if (ret < 0) 1167 if (ret < 0)
888 goto err_free_event; 1168 goto err_free_event;
889 1169
890 node->map_nr = req->cmd_buf_nr; 1170 node->buf_info.map_nr = req->cmd_buf_nr;
891 if (req->cmd_buf_nr) { 1171 if (req->cmd_buf_nr) {
892 struct drm_exynos_g2d_cmd *cmd_buf; 1172 struct drm_exynos_g2d_cmd *cmd_buf;
893 1173
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 67e17ce112b6..0e6fe000578c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -164,6 +164,27 @@ out:
164 exynos_gem_obj = NULL; 164 exynos_gem_obj = NULL;
165} 165}
166 166
167unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
168 unsigned int gem_handle,
169 struct drm_file *file_priv)
170{
171 struct exynos_drm_gem_obj *exynos_gem_obj;
172 struct drm_gem_object *obj;
173
174 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
175 if (!obj) {
176 DRM_ERROR("failed to lookup gem object.\n");
177 return 0;
178 }
179
180 exynos_gem_obj = to_exynos_gem_obj(obj);
181
182 drm_gem_object_unreference_unlocked(obj);
183
184 return exynos_gem_obj->buffer->size;
185}
186
187
167struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, 188struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
168 unsigned long size) 189 unsigned long size)
169{ 190{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 35ebac47dc2b..468766bee450 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -130,6 +130,11 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
130int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 130int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
131 struct drm_file *file_priv); 131 struct drm_file *file_priv);
132 132
133/* get buffer size to gem handle. */
134unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
135 unsigned int gem_handle,
136 struct drm_file *file_priv);
137
133/* initialize gem object. */ 138/* initialize gem object. */
134int exynos_drm_gem_init_object(struct drm_gem_object *obj); 139int exynos_drm_gem_init_object(struct drm_gem_object *obj);
135 140
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 13ccbd4bcfaa..9504b0cd825a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -117,13 +117,12 @@ static struct edid *vidi_get_edid(struct device *dev,
117 } 117 }
118 118
119 edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; 119 edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
120 edid = kzalloc(edid_len, GFP_KERNEL); 120 edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
121 if (!edid) { 121 if (!edid) {
122 DRM_DEBUG_KMS("failed to allocate edid\n"); 122 DRM_DEBUG_KMS("failed to allocate edid\n");
123 return ERR_PTR(-ENOMEM); 123 return ERR_PTR(-ENOMEM);
124 } 124 }
125 125
126 memcpy(edid, ctx->raw_edid, edid_len);
127 return edid; 126 return edid;
128} 127}
129 128
@@ -563,12 +562,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
563 return -EINVAL; 562 return -EINVAL;
564 } 563 }
565 edid_len = (1 + raw_edid->extensions) * EDID_LENGTH; 564 edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
566 ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL); 565 ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
567 if (!ctx->raw_edid) { 566 if (!ctx->raw_edid) {
568 DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); 567 DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
569 return -ENOMEM; 568 return -ENOMEM;
570 } 569 }
571 memcpy(ctx->raw_edid, raw_edid, edid_len);
572 } else { 570 } else {
573 /* 571 /*
574 * with connection = 0, free raw_edid 572 * with connection = 0, free raw_edid
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e919aba29b3d..2f4f72f07047 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -818,7 +818,7 @@ static void mixer_win_disable(void *ctx, int win)
818 mixer_ctx->win_data[win].enabled = false; 818 mixer_ctx->win_data[win].enabled = false;
819} 819}
820 820
821int mixer_check_timing(void *ctx, struct fb_videomode *timing) 821static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
822{ 822{
823 struct mixer_context *mixer_ctx = ctx; 823 struct mixer_context *mixer_ctx = ctx;
824 u32 w, h; 824 u32 w, h;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index aae31489c893..7299ea45dd03 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -103,7 +103,7 @@ static const char *cache_level_str(int type)
103static void 103static void
104describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 104describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
105{ 105{
106 seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", 106 seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
107 &obj->base, 107 &obj->base,
108 get_pin_flag(obj), 108 get_pin_flag(obj),
109 get_tiling_flag(obj), 109 get_tiling_flag(obj),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0a8eceb75902..e9b57893db2b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -125,6 +125,11 @@ MODULE_PARM_DESC(preliminary_hw_support,
125 "Enable Haswell and ValleyView Support. " 125 "Enable Haswell and ValleyView Support. "
126 "(default: false)"); 126 "(default: false)");
127 127
128int i915_disable_power_well __read_mostly = 0;
129module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
130MODULE_PARM_DESC(disable_power_well,
131 "Disable the power well when possible (default: false)");
132
128static struct drm_driver driver; 133static struct drm_driver driver;
129extern int intel_agp_enabled; 134extern int intel_agp_enabled;
130 135
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e95337c97459..01769e2a9953 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1398,6 +1398,7 @@ extern int i915_enable_fbc __read_mostly;
1398extern bool i915_enable_hangcheck __read_mostly; 1398extern bool i915_enable_hangcheck __read_mostly;
1399extern int i915_enable_ppgtt __read_mostly; 1399extern int i915_enable_ppgtt __read_mostly;
1400extern unsigned int i915_preliminary_hw_support __read_mostly; 1400extern unsigned int i915_preliminary_hw_support __read_mostly;
1401extern int i915_disable_power_well __read_mostly;
1401 1402
1402extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1403extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1403extern int i915_resume(struct drm_device *dev); 1404extern int i915_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2f2daebd0eef..3b11ab0fbc96 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -732,6 +732,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
732 int count) 732 int count)
733{ 733{
734 int i; 734 int i;
735 int relocs_total = 0;
736 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
735 737
736 for (i = 0; i < count; i++) { 738 for (i = 0; i < count; i++) {
737 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; 739 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
@@ -740,10 +742,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
740 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) 742 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
741 return -EINVAL; 743 return -EINVAL;
742 744
743 /* First check for malicious input causing overflow */ 745 /* First check for malicious input causing overflow in
744 if (exec[i].relocation_count > 746 * the worst case where we need to allocate the entire
745 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) 747 * relocation tree as a single array.
748 */
749 if (exec[i].relocation_count > relocs_max - relocs_total)
746 return -EINVAL; 750 return -EINVAL;
751 relocs_total += exec[i].relocation_count;
747 752
748 length = exec[i].relocation_count * 753 length = exec[i].relocation_count *
749 sizeof(struct drm_i915_gem_relocation_entry); 754 sizeof(struct drm_i915_gem_relocation_entry);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 287b42c9d1a8..b20d50192fcc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5771,6 +5771,11 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5771 num_connectors++; 5771 num_connectors++;
5772 } 5772 }
5773 5773
5774 if (is_cpu_edp)
5775 intel_crtc->cpu_transcoder = TRANSCODER_EDP;
5776 else
5777 intel_crtc->cpu_transcoder = pipe;
5778
5774 /* We are not sure yet this won't happen. */ 5779 /* We are not sure yet this won't happen. */
5775 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", 5780 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
5776 INTEL_PCH_TYPE(dev)); 5781 INTEL_PCH_TYPE(dev));
@@ -5837,11 +5842,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5837 int pipe = intel_crtc->pipe; 5842 int pipe = intel_crtc->pipe;
5838 int ret; 5843 int ret;
5839 5844
5840 if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
5841 intel_crtc->cpu_transcoder = TRANSCODER_EDP;
5842 else
5843 intel_crtc->cpu_transcoder = pipe;
5844
5845 drm_vblank_pre_modeset(dev, pipe); 5845 drm_vblank_pre_modeset(dev, pipe);
5846 5846
5847 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, 5847 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6f728e5ee793..d7d4afe01341 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -820,6 +820,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
820 struct intel_link_m_n m_n; 820 struct intel_link_m_n m_n;
821 int pipe = intel_crtc->pipe; 821 int pipe = intel_crtc->pipe;
822 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 822 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
823 int target_clock;
823 824
824 /* 825 /*
825 * Find the lane count in the intel_encoder private 826 * Find the lane count in the intel_encoder private
@@ -835,13 +836,22 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
835 } 836 }
836 } 837 }
837 838
839 target_clock = mode->clock;
840 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
841 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
842 target_clock = intel_edp_target_clock(intel_encoder,
843 mode);
844 break;
845 }
846 }
847
838 /* 848 /*
839 * Compute the GMCH and Link ratios. The '3' here is 849 * Compute the GMCH and Link ratios. The '3' here is
840 * the number of bytes_per_pixel post-LUT, which we always 850 * the number of bytes_per_pixel post-LUT, which we always
841 * set up for 8-bits of R/G/B, or 3 bytes total. 851 * set up for 8-bits of R/G/B, or 3 bytes total.
842 */ 852 */
843 intel_link_compute_m_n(intel_crtc->bpp, lane_count, 853 intel_link_compute_m_n(intel_crtc->bpp, lane_count,
844 mode->clock, adjusted_mode->clock, &m_n); 854 target_clock, adjusted_mode->clock, &m_n);
845 855
846 if (IS_HASWELL(dev)) { 856 if (IS_HASWELL(dev)) {
847 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), 857 I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
@@ -1930,7 +1940,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1930 for (i = 0; i < intel_dp->lane_count; i++) 1940 for (i = 0; i < intel_dp->lane_count; i++)
1931 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1941 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1932 break; 1942 break;
1933 if (i == intel_dp->lane_count && voltage_tries == 5) { 1943 if (i == intel_dp->lane_count) {
1934 ++loop_tries; 1944 ++loop_tries;
1935 if (loop_tries == 5) { 1945 if (loop_tries == 5) {
1936 DRM_DEBUG_KMS("too many full retries, give up\n"); 1946 DRM_DEBUG_KMS("too many full retries, give up\n");
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index acf8aec9ada7..ef4744e1bf0b 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -203,7 +203,13 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
203 algo->data = bus; 203 algo->data = bus;
204} 204}
205 205
206#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4) 206/*
207 * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
208 * mode. This results in spurious interrupt warnings if the legacy irq no. is
209 * shared with another device. The kernel then disables that interrupt source
210 * and so prevents the other device from working properly.
211 */
212#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
207static int 213static int
208gmbus_wait_hw_status(struct drm_i915_private *dev_priv, 214gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
209 u32 gmbus2_status, 215 u32 gmbus2_status,
@@ -214,6 +220,9 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
214 u32 gmbus2 = 0; 220 u32 gmbus2 = 0;
215 DEFINE_WAIT(wait); 221 DEFINE_WAIT(wait);
216 222
223 if (!HAS_GMBUS_IRQ(dev_priv->dev))
224 gmbus4_irq_en = 0;
225
217 /* Important: The hw handles only the first bit, so set only one! Since 226 /* Important: The hw handles only the first bit, so set only one! Since
218 * we also need to check for NAKs besides the hw ready/idle signal, we 227 * we also need to check for NAKs besides the hw ready/idle signal, we
219 * need to wake up periodically and check that ourselves. */ 228 * need to wake up periodically and check that ourselves. */
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a3730e0289e5..bee8cb6108a7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -321,9 +321,6 @@ void intel_panel_enable_backlight(struct drm_device *dev,
321 if (dev_priv->backlight_level == 0) 321 if (dev_priv->backlight_level == 0)
322 dev_priv->backlight_level = intel_panel_get_max_backlight(dev); 322 dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
323 323
324 dev_priv->backlight_enabled = true;
325 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
326
327 if (INTEL_INFO(dev)->gen >= 4) { 324 if (INTEL_INFO(dev)->gen >= 4) {
328 uint32_t reg, tmp; 325 uint32_t reg, tmp;
329 326
@@ -359,12 +356,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
359 } 356 }
360 357
361set_level: 358set_level:
362 /* Check the current backlight level and try to set again if it's zero. 359 /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
363 * On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically 360 * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
364 * when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written. 361 * registers are set.
365 */ 362 */
366 if (!intel_panel_get_backlight(dev)) 363 dev_priv->backlight_enabled = true;
367 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); 364 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
368} 365}
369 366
370static void intel_panel_init_backlight(struct drm_device *dev) 367static void intel_panel_init_backlight(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a1794c6df1bf..adca00783e61 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4079,6 +4079,9 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
4079 if (!IS_HASWELL(dev)) 4079 if (!IS_HASWELL(dev))
4080 return; 4080 return;
4081 4081
4082 if (!i915_disable_power_well && !enable)
4083 return;
4084
4082 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 4085 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
4083 is_enabled = tmp & HSW_PWR_WELL_STATE; 4086 is_enabled = tmp & HSW_PWR_WELL_STATE;
4084 enable_requested = tmp & HSW_PWR_WELL_ENABLE; 4087 enable_requested = tmp & HSW_PWR_WELL_ENABLE;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index a274b9906ef8..fe22bb780e1d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -382,19 +382,19 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
382 m = n = p = 0; 382 m = n = p = 0;
383 vcomax = 800000; 383 vcomax = 800000;
384 vcomin = 400000; 384 vcomin = 400000;
385 pllreffreq = 3333; 385 pllreffreq = 33333;
386 386
387 delta = 0xffffffff; 387 delta = 0xffffffff;
388 permitteddelta = clock * 5 / 1000; 388 permitteddelta = clock * 5 / 1000;
389 389
390 for (testp = 16; testp > 0; testp--) { 390 for (testp = 16; testp > 0; testp >>= 1) {
391 if (clock * testp > vcomax) 391 if (clock * testp > vcomax)
392 continue; 392 continue;
393 if (clock * testp < vcomin) 393 if (clock * testp < vcomin)
394 continue; 394 continue;
395 395
396 for (testm = 1; testm < 33; testm++) { 396 for (testm = 1; testm < 33; testm++) {
397 for (testn = 1; testn < 257; testn++) { 397 for (testn = 17; testn < 257; testn++) {
398 computed = (pllreffreq * testn) / 398 computed = (pllreffreq * testn) /
399 (testm * testp); 399 (testm * testp);
400 if (computed > clock) 400 if (computed > clock)
@@ -404,11 +404,11 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
404 if (tmpdelta < delta) { 404 if (tmpdelta < delta) {
405 delta = tmpdelta; 405 delta = tmpdelta;
406 n = testn - 1; 406 n = testn - 1;
407 m = (testm - 1) | ((n >> 1) & 0x80); 407 m = (testm - 1);
408 p = testp - 1; 408 p = testp - 1;
409 } 409 }
410 if ((clock * testp) >= 600000) 410 if ((clock * testp) >= 600000)
411 p |= 80; 411 p |= 0x80;
412 } 412 }
413 } 413 }
414 } 414 }
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
index 0daab62ea14c..3b2e7b6304d3 100644
--- a/drivers/gpu/drm/nouveau/core/core/object.c
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -278,7 +278,6 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
278 struct nouveau_object *parent = NULL; 278 struct nouveau_object *parent = NULL;
279 struct nouveau_object *namedb = NULL; 279 struct nouveau_object *namedb = NULL;
280 struct nouveau_handle *handle = NULL; 280 struct nouveau_handle *handle = NULL;
281 int ret = -EINVAL;
282 281
283 parent = nouveau_handle_ref(client, _parent); 282 parent = nouveau_handle_ref(client, _parent);
284 if (!parent) 283 if (!parent)
@@ -295,7 +294,7 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
295 } 294 }
296 295
297 nouveau_object_ref(NULL, &parent); 296 nouveau_object_ref(NULL, &parent);
298 return ret; 297 return handle ? 0 : -EINVAL;
299} 298}
300 299
301int 300int
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index 6b17b614629f..0b20fc0d19c1 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -4,7 +4,7 @@
4#include <core/device.h> 4#include <core/device.h>
5#include <core/subdev.h> 5#include <core/subdev.h>
6 6
7enum nouveau_therm_mode { 7enum nouveau_therm_fan_mode {
8 NOUVEAU_THERM_CTRL_NONE = 0, 8 NOUVEAU_THERM_CTRL_NONE = 0,
9 NOUVEAU_THERM_CTRL_MANUAL = 1, 9 NOUVEAU_THERM_CTRL_MANUAL = 1,
10 NOUVEAU_THERM_CTRL_AUTO = 2, 10 NOUVEAU_THERM_CTRL_AUTO = 2,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index f794dc89a3b2..a00a5a76e2d6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -134,7 +134,7 @@ nouveau_therm_alarm(struct nouveau_alarm *alarm)
134} 134}
135 135
136int 136int
137nouveau_therm_mode(struct nouveau_therm *therm, int mode) 137nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode)
138{ 138{
139 struct nouveau_therm_priv *priv = (void *)therm; 139 struct nouveau_therm_priv *priv = (void *)therm;
140 struct nouveau_device *device = nv_device(therm); 140 struct nouveau_device *device = nv_device(therm);
@@ -149,10 +149,15 @@ nouveau_therm_mode(struct nouveau_therm *therm, int mode)
149 (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0)) 149 (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0))
150 return -EINVAL; 150 return -EINVAL;
151 151
152 /* do not allow automatic fan management if the thermal sensor is
153 * not available */
154 if (priv->mode == 2 && therm->temp_get(therm) < 0)
155 return -EINVAL;
156
152 if (priv->mode == mode) 157 if (priv->mode == mode)
153 return 0; 158 return 0;
154 159
155 nv_info(therm, "Thermal management: %s\n", name[mode]); 160 nv_info(therm, "fan management: %s\n", name[mode]);
156 nouveau_therm_update(therm, mode); 161 nouveau_therm_update(therm, mode);
157 return 0; 162 return 0;
158} 163}
@@ -213,7 +218,7 @@ nouveau_therm_attr_set(struct nouveau_therm *therm,
213 priv->fan->bios.max_duty = value; 218 priv->fan->bios.max_duty = value;
214 return 0; 219 return 0;
215 case NOUVEAU_THERM_ATTR_FAN_MODE: 220 case NOUVEAU_THERM_ATTR_FAN_MODE:
216 return nouveau_therm_mode(therm, value); 221 return nouveau_therm_fan_mode(therm, value);
217 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST: 222 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
218 priv->bios_sensor.thrs_fan_boost.temp = value; 223 priv->bios_sensor.thrs_fan_boost.temp = value;
219 priv->sensor.program_alarms(therm); 224 priv->sensor.program_alarms(therm);
@@ -263,7 +268,7 @@ _nouveau_therm_init(struct nouveau_object *object)
263 return ret; 268 return ret;
264 269
265 if (priv->suspend >= 0) 270 if (priv->suspend >= 0)
266 nouveau_therm_mode(therm, priv->mode); 271 nouveau_therm_fan_mode(therm, priv->mode);
267 priv->sensor.program_alarms(therm); 272 priv->sensor.program_alarms(therm);
268 return 0; 273 return 0;
269} 274}
@@ -313,11 +318,12 @@ nouveau_therm_create_(struct nouveau_object *parent,
313int 318int
314nouveau_therm_preinit(struct nouveau_therm *therm) 319nouveau_therm_preinit(struct nouveau_therm *therm)
315{ 320{
316 nouveau_therm_ic_ctor(therm);
317 nouveau_therm_sensor_ctor(therm); 321 nouveau_therm_sensor_ctor(therm);
322 nouveau_therm_ic_ctor(therm);
318 nouveau_therm_fan_ctor(therm); 323 nouveau_therm_fan_ctor(therm);
319 324
320 nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_NONE); 325 nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE);
326 nouveau_therm_sensor_preinit(therm);
321 return 0; 327 return 0;
322} 328}
323 329
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e24090bac195..8b3adec5fbb1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -32,6 +32,7 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
32 struct i2c_board_info *info) 32 struct i2c_board_info *info)
33{ 33{
34 struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); 34 struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c);
35 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
35 struct i2c_client *client; 36 struct i2c_client *client;
36 37
37 request_module("%s%s", I2C_MODULE_PREFIX, info->type); 38 request_module("%s%s", I2C_MODULE_PREFIX, info->type);
@@ -46,8 +47,9 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
46 } 47 }
47 48
48 nv_info(priv, 49 nv_info(priv,
49 "Found an %s at address 0x%x (controlled by lm_sensors)\n", 50 "Found an %s at address 0x%x (controlled by lm_sensors, "
50 info->type, info->addr); 51 "temp offset %+i C)\n",
52 info->type, info->addr, sensor->offset_constant);
51 priv->ic = client; 53 priv->ic = client;
52 54
53 return true; 55 return true;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
index 0f5363edb964..a70d1b7e397b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
@@ -29,54 +29,83 @@ struct nv40_therm_priv {
29 struct nouveau_therm_priv base; 29 struct nouveau_therm_priv base;
30}; 30};
31 31
32enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 };
33
34static enum nv40_sensor_style
35nv40_sensor_style(struct nouveau_therm *therm)
36{
37 struct nouveau_device *device = nv_device(therm);
38
39 switch (device->chipset) {
40 case 0x43:
41 case 0x44:
42 case 0x4a:
43 case 0x47:
44 return OLD_STYLE;
45
46 case 0x46:
47 case 0x49:
48 case 0x4b:
49 case 0x4e:
50 case 0x4c:
51 case 0x67:
52 case 0x68:
53 case 0x63:
54 return NEW_STYLE;
55 default:
56 return INVALID_STYLE;
57 }
58}
59
32static int 60static int
33nv40_sensor_setup(struct nouveau_therm *therm) 61nv40_sensor_setup(struct nouveau_therm *therm)
34{ 62{
35 struct nouveau_device *device = nv_device(therm); 63 enum nv40_sensor_style style = nv40_sensor_style(therm);
36 64
37 /* enable ADC readout and disable the ALARM threshold */ 65 /* enable ADC readout and disable the ALARM threshold */
38 if (device->chipset >= 0x46) { 66 if (style == NEW_STYLE) {
39 nv_mask(therm, 0x15b8, 0x80000000, 0); 67 nv_mask(therm, 0x15b8, 0x80000000, 0);
40 nv_wr32(therm, 0x15b0, 0x80003fff); 68 nv_wr32(therm, 0x15b0, 0x80003fff);
41 mdelay(10); /* wait for the temperature to stabilize */ 69 mdelay(20); /* wait for the temperature to stabilize */
42 return nv_rd32(therm, 0x15b4) & 0x3fff; 70 return nv_rd32(therm, 0x15b4) & 0x3fff;
43 } else { 71 } else if (style == OLD_STYLE) {
44 nv_wr32(therm, 0x15b0, 0xff); 72 nv_wr32(therm, 0x15b0, 0xff);
73 mdelay(20); /* wait for the temperature to stabilize */
45 return nv_rd32(therm, 0x15b4) & 0xff; 74 return nv_rd32(therm, 0x15b4) & 0xff;
46 } 75 } else
76 return -ENODEV;
47} 77}
48 78
49static int 79static int
50nv40_temp_get(struct nouveau_therm *therm) 80nv40_temp_get(struct nouveau_therm *therm)
51{ 81{
52 struct nouveau_therm_priv *priv = (void *)therm; 82 struct nouveau_therm_priv *priv = (void *)therm;
53 struct nouveau_device *device = nv_device(therm);
54 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 83 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
84 enum nv40_sensor_style style = nv40_sensor_style(therm);
55 int core_temp; 85 int core_temp;
56 86
57 if (device->chipset >= 0x46) { 87 if (style == NEW_STYLE) {
58 nv_wr32(therm, 0x15b0, 0x80003fff); 88 nv_wr32(therm, 0x15b0, 0x80003fff);
59 core_temp = nv_rd32(therm, 0x15b4) & 0x3fff; 89 core_temp = nv_rd32(therm, 0x15b4) & 0x3fff;
60 } else { 90 } else if (style == OLD_STYLE) {
61 nv_wr32(therm, 0x15b0, 0xff); 91 nv_wr32(therm, 0x15b0, 0xff);
62 core_temp = nv_rd32(therm, 0x15b4) & 0xff; 92 core_temp = nv_rd32(therm, 0x15b4) & 0xff;
63 } 93 } else
64 94 return -ENODEV;
65 /* Setup the sensor if the temperature is 0 */
66 if (core_temp == 0)
67 core_temp = nv40_sensor_setup(therm);
68 95
69 if (sensor->slope_div == 0) 96 /* if the slope or the offset is unset, do no use the sensor */
70 sensor->slope_div = 1; 97 if (!sensor->slope_div || !sensor->slope_mult ||
71 if (sensor->offset_den == 0) 98 !sensor->offset_num || !sensor->offset_den)
72 sensor->offset_den = 1; 99 return -ENODEV;
73 if (sensor->slope_mult < 1)
74 sensor->slope_mult = 1;
75 100
76 core_temp = core_temp * sensor->slope_mult / sensor->slope_div; 101 core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
77 core_temp = core_temp + sensor->offset_num / sensor->offset_den; 102 core_temp = core_temp + sensor->offset_num / sensor->offset_den;
78 core_temp = core_temp + sensor->offset_constant - 8; 103 core_temp = core_temp + sensor->offset_constant - 8;
79 104
105 /* reserve negative temperatures for errors */
106 if (core_temp < 0)
107 core_temp = 0;
108
80 return core_temp; 109 return core_temp;
81} 110}
82 111
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 06b98706b3fc..438d9824b774 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -102,7 +102,7 @@ struct nouveau_therm_priv {
102 struct i2c_client *ic; 102 struct i2c_client *ic;
103}; 103};
104 104
105int nouveau_therm_mode(struct nouveau_therm *therm, int mode); 105int nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode);
106int nouveau_therm_attr_get(struct nouveau_therm *therm, 106int nouveau_therm_attr_get(struct nouveau_therm *therm,
107 enum nouveau_therm_attr_type type); 107 enum nouveau_therm_attr_type type);
108int nouveau_therm_attr_set(struct nouveau_therm *therm, 108int nouveau_therm_attr_set(struct nouveau_therm *therm,
@@ -122,6 +122,7 @@ int nouveau_therm_fan_sense(struct nouveau_therm *therm);
122 122
123int nouveau_therm_preinit(struct nouveau_therm *); 123int nouveau_therm_preinit(struct nouveau_therm *);
124 124
125void nouveau_therm_sensor_preinit(struct nouveau_therm *);
125void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm, 126void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
126 enum nouveau_therm_thrs thrs, 127 enum nouveau_therm_thrs thrs,
127 enum nouveau_therm_thrs_state st); 128 enum nouveau_therm_thrs_state st);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index b37624af8297..470f6a47b656 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -34,10 +34,6 @@ nouveau_therm_temp_set_defaults(struct nouveau_therm *therm)
34{ 34{
35 struct nouveau_therm_priv *priv = (void *)therm; 35 struct nouveau_therm_priv *priv = (void *)therm;
36 36
37 priv->bios_sensor.slope_mult = 1;
38 priv->bios_sensor.slope_div = 1;
39 priv->bios_sensor.offset_num = 0;
40 priv->bios_sensor.offset_den = 1;
41 priv->bios_sensor.offset_constant = 0; 37 priv->bios_sensor.offset_constant = 0;
42 38
43 priv->bios_sensor.thrs_fan_boost.temp = 90; 39 priv->bios_sensor.thrs_fan_boost.temp = 90;
@@ -60,11 +56,6 @@ nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
60 struct nouveau_therm_priv *priv = (void *)therm; 56 struct nouveau_therm_priv *priv = (void *)therm;
61 struct nvbios_therm_sensor *s = &priv->bios_sensor; 57 struct nvbios_therm_sensor *s = &priv->bios_sensor;
62 58
63 if (!priv->bios_sensor.slope_div)
64 priv->bios_sensor.slope_div = 1;
65 if (!priv->bios_sensor.offset_den)
66 priv->bios_sensor.offset_den = 1;
67
68 /* enforce a minimum hysteresis on thresholds */ 59 /* enforce a minimum hysteresis on thresholds */
69 s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2); 60 s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
70 s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2); 61 s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2);
@@ -106,16 +97,16 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
106 const char *thresolds[] = { 97 const char *thresolds[] = {
107 "fanboost", "downclock", "critical", "shutdown" 98 "fanboost", "downclock", "critical", "shutdown"
108 }; 99 };
109 uint8_t temperature = therm->temp_get(therm); 100 int temperature = therm->temp_get(therm);
110 101
111 if (thrs < 0 || thrs > 3) 102 if (thrs < 0 || thrs > 3)
112 return; 103 return;
113 104
114 if (dir == NOUVEAU_THERM_THRS_FALLING) 105 if (dir == NOUVEAU_THERM_THRS_FALLING)
115 nv_info(therm, "temperature (%u C) went below the '%s' threshold\n", 106 nv_info(therm, "temperature (%i C) went below the '%s' threshold\n",
116 temperature, thresolds[thrs]); 107 temperature, thresolds[thrs]);
117 else 108 else
118 nv_info(therm, "temperature (%u C) hit the '%s' threshold\n", 109 nv_info(therm, "temperature (%i C) hit the '%s' threshold\n",
119 temperature, thresolds[thrs]); 110 temperature, thresolds[thrs]);
120 111
121 active = (dir == NOUVEAU_THERM_THRS_RISING); 112 active = (dir == NOUVEAU_THERM_THRS_RISING);
@@ -123,7 +114,7 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
123 case NOUVEAU_THERM_THRS_FANBOOST: 114 case NOUVEAU_THERM_THRS_FANBOOST:
124 if (active) { 115 if (active) {
125 nouveau_therm_fan_set(therm, true, 100); 116 nouveau_therm_fan_set(therm, true, 100);
126 nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_AUTO); 117 nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
127 } 118 }
128 break; 119 break;
129 case NOUVEAU_THERM_THRS_DOWNCLOCK: 120 case NOUVEAU_THERM_THRS_DOWNCLOCK:
@@ -202,7 +193,7 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
202 NOUVEAU_THERM_THRS_SHUTDOWN); 193 NOUVEAU_THERM_THRS_SHUTDOWN);
203 194
204 /* schedule the next poll in one second */ 195 /* schedule the next poll in one second */
205 if (list_empty(&alarm->head)) 196 if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
206 ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); 197 ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
207 198
208 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); 199 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
@@ -225,6 +216,17 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
225 alarm_timer_callback(&priv->sensor.therm_poll_alarm); 216 alarm_timer_callback(&priv->sensor.therm_poll_alarm);
226} 217}
227 218
219void
220nouveau_therm_sensor_preinit(struct nouveau_therm *therm)
221{
222 const char *sensor_avail = "yes";
223
224 if (therm->temp_get(therm) < 0)
225 sensor_avail = "no";
226
227 nv_info(therm, "internal sensor: %s\n", sensor_avail);
228}
229
228int 230int
229nouveau_therm_sensor_ctor(struct nouveau_therm *therm) 231nouveau_therm_sensor_ctor(struct nouveau_therm *therm)
230{ 232{
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index bb54098c6d97..936b442a6ab7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -402,8 +402,12 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
402 struct drm_device *dev = dev_get_drvdata(d); 402 struct drm_device *dev = dev_get_drvdata(d);
403 struct nouveau_drm *drm = nouveau_drm(dev); 403 struct nouveau_drm *drm = nouveau_drm(dev);
404 struct nouveau_therm *therm = nouveau_therm(drm->device); 404 struct nouveau_therm *therm = nouveau_therm(drm->device);
405 int temp = therm->temp_get(therm);
405 406
406 return snprintf(buf, PAGE_SIZE, "%d\n", therm->temp_get(therm) * 1000); 407 if (temp < 0)
408 return temp;
409
410 return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000);
407} 411}
408static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, 412static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
409 NULL, 0); 413 NULL, 0);
@@ -871,7 +875,12 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
871 nouveau_hwmon_get_pwm1_max, 875 nouveau_hwmon_get_pwm1_max,
872 nouveau_hwmon_set_pwm1_max, 0); 876 nouveau_hwmon_set_pwm1_max, 0);
873 877
874static struct attribute *hwmon_attributes[] = { 878static struct attribute *hwmon_default_attributes[] = {
879 &sensor_dev_attr_name.dev_attr.attr,
880 &sensor_dev_attr_update_rate.dev_attr.attr,
881 NULL
882};
883static struct attribute *hwmon_temp_attributes[] = {
875 &sensor_dev_attr_temp1_input.dev_attr.attr, 884 &sensor_dev_attr_temp1_input.dev_attr.attr,
876 &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr, 885 &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
877 &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, 886 &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
@@ -882,8 +891,6 @@ static struct attribute *hwmon_attributes[] = {
882 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 891 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
883 &sensor_dev_attr_temp1_emergency.dev_attr.attr, 892 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
884 &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr, 893 &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
885 &sensor_dev_attr_name.dev_attr.attr,
886 &sensor_dev_attr_update_rate.dev_attr.attr,
887 NULL 894 NULL
888}; 895};
889static struct attribute *hwmon_fan_rpm_attributes[] = { 896static struct attribute *hwmon_fan_rpm_attributes[] = {
@@ -898,8 +905,11 @@ static struct attribute *hwmon_pwm_fan_attributes[] = {
898 NULL 905 NULL
899}; 906};
900 907
901static const struct attribute_group hwmon_attrgroup = { 908static const struct attribute_group hwmon_default_attrgroup = {
902 .attrs = hwmon_attributes, 909 .attrs = hwmon_default_attributes,
910};
911static const struct attribute_group hwmon_temp_attrgroup = {
912 .attrs = hwmon_temp_attributes,
903}; 913};
904static const struct attribute_group hwmon_fan_rpm_attrgroup = { 914static const struct attribute_group hwmon_fan_rpm_attrgroup = {
905 .attrs = hwmon_fan_rpm_attributes, 915 .attrs = hwmon_fan_rpm_attributes,
@@ -931,13 +941,22 @@ nouveau_hwmon_init(struct drm_device *dev)
931 } 941 }
932 dev_set_drvdata(hwmon_dev, dev); 942 dev_set_drvdata(hwmon_dev, dev);
933 943
934 /* default sysfs entries */ 944 /* set the default attributes */
935 ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_attrgroup); 945 ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup);
936 if (ret) { 946 if (ret) {
937 if (ret) 947 if (ret)
938 goto error; 948 goto error;
939 } 949 }
940 950
951 /* if the card has a working thermal sensor */
952 if (therm->temp_get(therm) >= 0) {
953 ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
954 if (ret) {
955 if (ret)
956 goto error;
957 }
958 }
959
941 /* if the card has a pwm fan */ 960 /* if the card has a pwm fan */
942 /*XXX: incorrect, need better detection for this, some boards have 961 /*XXX: incorrect, need better detection for this, some boards have
943 * the gpio entries for pwm fan control even when there's no 962 * the gpio entries for pwm fan control even when there's no
@@ -979,11 +998,10 @@ nouveau_hwmon_fini(struct drm_device *dev)
979 struct nouveau_pm *pm = nouveau_pm(dev); 998 struct nouveau_pm *pm = nouveau_pm(dev);
980 999
981 if (pm->hwmon) { 1000 if (pm->hwmon) {
982 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); 1001 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup);
983 sysfs_remove_group(&pm->hwmon->kobj, 1002 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup);
984 &hwmon_pwm_fan_attrgroup); 1003 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
985 sysfs_remove_group(&pm->hwmon->kobj, 1004 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
986 &hwmon_fan_rpm_attrgroup);
987 1005
988 hwmon_device_unregister(pm->hwmon); 1006 hwmon_device_unregister(pm->hwmon);
989 } 1007 }
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2db57990f65c..7f0e6c3f37d1 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -524,6 +524,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
524 swap_interval <<= 4; 524 swap_interval <<= 4;
525 if (swap_interval == 0) 525 if (swap_interval == 0)
526 swap_interval |= 0x100; 526 swap_interval |= 0x100;
527 if (chan == NULL)
528 evo_sync(crtc->dev);
527 529
528 push = evo_wait(sync, 128); 530 push = evo_wait(sync, 128);
529 if (unlikely(push == NULL)) 531 if (unlikely(push == NULL))
@@ -586,8 +588,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
586 sync->addr ^= 0x10; 588 sync->addr ^= 0x10;
587 sync->data++; 589 sync->data++;
588 FIRE_RING (chan); 590 FIRE_RING (chan);
589 } else {
590 evo_sync(crtc->dev);
591 } 591 }
592 592
593 /* queue the flip */ 593 /* queue the flip */
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index d4c633e12863..27769e724b6d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -468,13 +468,19 @@ static void cayman_gpu_init(struct radeon_device *rdev)
468 (rdev->pdev->device == 0x9907) || 468 (rdev->pdev->device == 0x9907) ||
469 (rdev->pdev->device == 0x9908) || 469 (rdev->pdev->device == 0x9908) ||
470 (rdev->pdev->device == 0x9909) || 470 (rdev->pdev->device == 0x9909) ||
471 (rdev->pdev->device == 0x990B) ||
472 (rdev->pdev->device == 0x990C) ||
473 (rdev->pdev->device == 0x990F) ||
471 (rdev->pdev->device == 0x9910) || 474 (rdev->pdev->device == 0x9910) ||
472 (rdev->pdev->device == 0x9917)) { 475 (rdev->pdev->device == 0x9917) ||
476 (rdev->pdev->device == 0x9999)) {
473 rdev->config.cayman.max_simds_per_se = 6; 477 rdev->config.cayman.max_simds_per_se = 6;
474 rdev->config.cayman.max_backends_per_se = 2; 478 rdev->config.cayman.max_backends_per_se = 2;
475 } else if ((rdev->pdev->device == 0x9903) || 479 } else if ((rdev->pdev->device == 0x9903) ||
476 (rdev->pdev->device == 0x9904) || 480 (rdev->pdev->device == 0x9904) ||
477 (rdev->pdev->device == 0x990A) || 481 (rdev->pdev->device == 0x990A) ||
482 (rdev->pdev->device == 0x990D) ||
483 (rdev->pdev->device == 0x990E) ||
478 (rdev->pdev->device == 0x9913) || 484 (rdev->pdev->device == 0x9913) ||
479 (rdev->pdev->device == 0x9918)) { 485 (rdev->pdev->device == 0x9918)) {
480 rdev->config.cayman.max_simds_per_se = 4; 486 rdev->config.cayman.max_simds_per_se = 4;
@@ -483,6 +489,9 @@ static void cayman_gpu_init(struct radeon_device *rdev)
483 (rdev->pdev->device == 0x9990) || 489 (rdev->pdev->device == 0x9990) ||
484 (rdev->pdev->device == 0x9991) || 490 (rdev->pdev->device == 0x9991) ||
485 (rdev->pdev->device == 0x9994) || 491 (rdev->pdev->device == 0x9994) ||
492 (rdev->pdev->device == 0x9995) ||
493 (rdev->pdev->device == 0x9996) ||
494 (rdev->pdev->device == 0x999A) ||
486 (rdev->pdev->device == 0x99A0)) { 495 (rdev->pdev->device == 0x99A0)) {
487 rdev->config.cayman.max_simds_per_se = 3; 496 rdev->config.cayman.max_simds_per_se = 3;
488 rdev->config.cayman.max_backends_per_se = 1; 497 rdev->config.cayman.max_backends_per_se = 1;
@@ -616,11 +625,22 @@ static void cayman_gpu_init(struct radeon_device *rdev)
616 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); 625 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
617 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); 626 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
618 627
619 tmp = gb_addr_config & NUM_PIPES_MASK; 628 if ((rdev->config.cayman.max_backends_per_se == 1) &&
620 tmp = r6xx_remap_render_backend(rdev, tmp, 629 (rdev->flags & RADEON_IS_IGP)) {
621 rdev->config.cayman.max_backends_per_se * 630 if ((disabled_rb_mask & 3) == 1) {
622 rdev->config.cayman.max_shader_engines, 631 /* RB0 disabled, RB1 enabled */
623 CAYMAN_MAX_BACKENDS, disabled_rb_mask); 632 tmp = 0x11111111;
633 } else {
634 /* RB1 disabled, RB0 enabled */
635 tmp = 0x00000000;
636 }
637 } else {
638 tmp = gb_addr_config & NUM_PIPES_MASK;
639 tmp = r6xx_remap_render_backend(rdev, tmp,
640 rdev->config.cayman.max_backends_per_se *
641 rdev->config.cayman.max_shader_engines,
642 CAYMAN_MAX_BACKENDS, disabled_rb_mask);
643 }
624 WREG32(GB_BACKEND_MAP, tmp); 644 WREG32(GB_BACKEND_MAP, tmp);
625 645
626 cgts_tcc_disable = 0xffff0000; 646 cgts_tcc_disable = 0xffff0000;
@@ -1771,6 +1791,7 @@ int cayman_resume(struct radeon_device *rdev)
1771int cayman_suspend(struct radeon_device *rdev) 1791int cayman_suspend(struct radeon_device *rdev)
1772{ 1792{
1773 r600_audio_fini(rdev); 1793 r600_audio_fini(rdev);
1794 radeon_vm_manager_fini(rdev);
1774 cayman_cp_enable(rdev, false); 1795 cayman_cp_enable(rdev, false);
1775 cayman_dma_stop(rdev); 1796 cayman_dma_stop(rdev);
1776 evergreen_irq_suspend(rdev); 1797 evergreen_irq_suspend(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index bedda9caadd9..6e05a2e75a46 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -122,10 +122,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
122 goto out_cleanup; 122 goto out_cleanup;
123 } 123 }
124 124
125 /* r100 doesn't have dma engine so skip the test */ 125 if (rdev->asic->copy.dma) {
126 /* also, VRAM-to-VRAM test doesn't make much sense for DMA */
127 /* skip it as well if domains are the same */
128 if ((rdev->asic->copy.dma) && (sdomain != ddomain)) {
129 time = radeon_benchmark_do_move(rdev, size, saddr, daddr, 126 time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
130 RADEON_BENCHMARK_COPY_DMA, n); 127 RADEON_BENCHMARK_COPY_DMA, n);
131 if (time < 0) 128 if (time < 0)
@@ -135,13 +132,15 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
135 sdomain, ddomain, "dma"); 132 sdomain, ddomain, "dma");
136 } 133 }
137 134
138 time = radeon_benchmark_do_move(rdev, size, saddr, daddr, 135 if (rdev->asic->copy.blit) {
139 RADEON_BENCHMARK_COPY_BLIT, n); 136 time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
140 if (time < 0) 137 RADEON_BENCHMARK_COPY_BLIT, n);
141 goto out_cleanup; 138 if (time < 0)
142 if (time > 0) 139 goto out_cleanup;
143 radeon_benchmark_log_results(n, size, time, 140 if (time > 0)
144 sdomain, ddomain, "blit"); 141 radeon_benchmark_log_results(n, size, time,
142 sdomain, ddomain, "blit");
143 }
145 144
146out_cleanup: 145out_cleanup:
147 if (sobj) { 146 if (sobj) {
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 9128120da044..bafbe3216952 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4469,6 +4469,7 @@ int si_resume(struct radeon_device *rdev)
4469 4469
4470int si_suspend(struct radeon_device *rdev) 4470int si_suspend(struct radeon_device *rdev)
4471{ 4471{
4472 radeon_vm_manager_fini(rdev);
4472 si_cp_enable(rdev, false); 4473 si_cp_enable(rdev, false);
4473 cayman_dma_stop(rdev); 4474 cayman_dma_stop(rdev);
4474 si_irq_suspend(rdev); 4475 si_irq_suspend(rdev);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 92e47e5c9564..c4388776f4e4 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -590,6 +590,9 @@
590#define USB_VENDOR_ID_MONTEREY 0x0566 590#define USB_VENDOR_ID_MONTEREY 0x0566
591#define USB_DEVICE_ID_GENIUS_KB29E 0x3004 591#define USB_DEVICE_ID_GENIUS_KB29E 0x3004
592 592
593#define USB_VENDOR_ID_MSI 0x1770
594#define USB_DEVICE_ID_MSI_GX680R_LED_PANEL 0xff00
595
593#define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400 596#define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400
594#define USB_DEVICE_ID_N_S_HARMONY 0xc359 597#define USB_DEVICE_ID_N_S_HARMONY 0xc359
595 598
@@ -684,6 +687,9 @@
684#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001 687#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
685#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 688#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
686 689
690#define USB_VENDOR_ID_REALTEK 0x0bda
691#define USB_DEVICE_ID_REALTEK_READER 0x0152
692
687#define USB_VENDOR_ID_ROCCAT 0x1e7d 693#define USB_VENDOR_ID_ROCCAT 0x1e7d
688#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 694#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
689#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c 695#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 7a1ebb867cf4..82e9211b3ca9 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -621,6 +621,7 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
621{ 621{
622 struct mt_device *td = hid_get_drvdata(hid); 622 struct mt_device *td = hid_get_drvdata(hid);
623 __s32 quirks = td->mtclass.quirks; 623 __s32 quirks = td->mtclass.quirks;
624 struct input_dev *input = field->hidinput->input;
624 625
625 if (hid->claimed & HID_CLAIMED_INPUT) { 626 if (hid->claimed & HID_CLAIMED_INPUT) {
626 switch (usage->hid) { 627 switch (usage->hid) {
@@ -670,13 +671,16 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
670 break; 671 break;
671 672
672 default: 673 default:
674 if (usage->type)
675 input_event(input, usage->type, usage->code,
676 value);
673 return; 677 return;
674 } 678 }
675 679
676 if (usage->usage_index + 1 == field->report_count) { 680 if (usage->usage_index + 1 == field->report_count) {
677 /* we only take into account the last report. */ 681 /* we only take into account the last report. */
678 if (usage->hid == td->last_slot_field) 682 if (usage->hid == td->last_slot_field)
679 mt_complete_slot(td, field->hidinput->input); 683 mt_complete_slot(td, input);
680 684
681 if (field->index == td->last_field_index 685 if (field->index == td->last_field_index
682 && td->num_received >= td->num_expected) 686 && td->num_received >= td->num_expected)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index e0e6abf1cd3b..19b8360f2330 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -73,6 +73,7 @@ static const struct hid_blacklist {
73 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 73 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
74 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 74 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
75 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 75 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
76 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
76 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, 77 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
77 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, 78 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
78 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, 79 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
@@ -80,6 +81,7 @@ static const struct hid_blacklist {
80 { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, 81 { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
81 { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET }, 82 { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
82 { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET }, 83 { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
84 { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
83 { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET }, 85 { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
84 { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET }, 86 { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
85 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, 87 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h
index 668ff4721323..5cde94e56f17 100644
--- a/drivers/hwmon/lm75.h
+++ b/drivers/hwmon/lm75.h
@@ -25,7 +25,7 @@
25 which contains this code, we don't worry about the wasted space. 25 which contains this code, we don't worry about the wasted space.
26*/ 26*/
27 27
28#include <linux/hwmon.h> 28#include <linux/kernel.h>
29 29
30/* straight from the datasheet */ 30/* straight from the datasheet */
31#define LM75_TEMP_MIN (-55000) 31#define LM75_TEMP_MIN (-55000)
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 46cde098c11c..e380c6eef3af 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -4,7 +4,6 @@
4 4
5menuconfig I2C 5menuconfig I2C
6 tristate "I2C support" 6 tristate "I2C support"
7 depends on !S390
8 select RT_MUTEXES 7 select RT_MUTEXES
9 ---help--- 8 ---help---
10 I2C (pronounce: I-squared-C) is a slow serial bus protocol used in 9 I2C (pronounce: I-squared-C) is a slow serial bus protocol used in
@@ -76,6 +75,7 @@ config I2C_HELPER_AUTO
76 75
77config I2C_SMBUS 76config I2C_SMBUS
78 tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO 77 tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO
78 depends on GENERIC_HARDIRQS
79 help 79 help
80 Say Y here if you want support for SMBus extensions to the I2C 80 Say Y here if you want support for SMBus extensions to the I2C
81 specification. At the moment, the only supported extension is 81 specification. At the moment, the only supported extension is
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a3725de92384..adfee98486b1 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -114,7 +114,7 @@ config I2C_I801
114 114
115config I2C_ISCH 115config I2C_ISCH
116 tristate "Intel SCH SMBus 1.0" 116 tristate "Intel SCH SMBus 1.0"
117 depends on PCI 117 depends on PCI && GENERIC_HARDIRQS
118 select LPC_SCH 118 select LPC_SCH
119 help 119 help
120 Say Y here if you want to use SMBus controller on the Intel SCH 120 Say Y here if you want to use SMBus controller on the Intel SCH
@@ -543,6 +543,7 @@ config I2C_NUC900
543 543
544config I2C_OCORES 544config I2C_OCORES
545 tristate "OpenCores I2C Controller" 545 tristate "OpenCores I2C Controller"
546 depends on GENERIC_HARDIRQS
546 help 547 help
547 If you say yes to this option, support will be included for the 548 If you say yes to this option, support will be included for the
548 OpenCores I2C controller. For details see 549 OpenCores I2C controller. For details see
@@ -777,7 +778,7 @@ config I2C_DIOLAN_U2C
777 778
778config I2C_PARPORT 779config I2C_PARPORT
779 tristate "Parallel port adapter" 780 tristate "Parallel port adapter"
780 depends on PARPORT 781 depends on PARPORT && GENERIC_HARDIRQS
781 select I2C_ALGOBIT 782 select I2C_ALGOBIT
782 select I2C_SMBUS 783 select I2C_SMBUS
783 help 784 help
@@ -802,6 +803,7 @@ config I2C_PARPORT
802 803
803config I2C_PARPORT_LIGHT 804config I2C_PARPORT_LIGHT
804 tristate "Parallel port adapter (light)" 805 tristate "Parallel port adapter (light)"
806 depends on GENERIC_HARDIRQS
805 select I2C_ALGOBIT 807 select I2C_ALGOBIT
806 select I2C_SMBUS 808 select I2C_SMBUS
807 help 809 help
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index e9205ee8cf94..130f02cc9d94 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -80,6 +80,7 @@
80/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ 80/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
81#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 81#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
82#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a 82#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
83#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
83 84
84#define ISMT_DESC_ENTRIES 32 /* number of descriptor entries */ 85#define ISMT_DESC_ENTRIES 32 /* number of descriptor entries */
85#define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */ 86#define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
@@ -185,6 +186,7 @@ struct ismt_priv {
185static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = { 186static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = {
186 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, 187 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
187 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, 188 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
189 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
188 { 0, } 190 { 0, }
189}; 191};
190 192
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 36704e3ab3fa..b714776b6ddd 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -411,7 +411,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
411 int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE; 411 int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE;
412 u32 clk_divisor; 412 u32 clk_divisor;
413 413
414 tegra_i2c_clock_enable(i2c_dev); 414 err = tegra_i2c_clock_enable(i2c_dev);
415 if (err < 0) {
416 dev_err(i2c_dev->dev, "Clock enable failed %d\n", err);
417 return err;
418 }
415 419
416 tegra_periph_reset_assert(i2c_dev->div_clk); 420 tegra_periph_reset_assert(i2c_dev->div_clk);
417 udelay(2); 421 udelay(2);
@@ -628,7 +632,12 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
628 if (i2c_dev->is_suspended) 632 if (i2c_dev->is_suspended)
629 return -EBUSY; 633 return -EBUSY;
630 634
631 tegra_i2c_clock_enable(i2c_dev); 635 ret = tegra_i2c_clock_enable(i2c_dev);
636 if (ret < 0) {
637 dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret);
638 return ret;
639 }
640
632 for (i = 0; i < num; i++) { 641 for (i = 0; i < num; i++) {
633 enum msg_end_type end_type = MSG_END_STOP; 642 enum msg_end_type end_type = MSG_END_STOP;
634 if (i < (num - 1)) { 643 if (i < (num - 1)) {
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index f3b8f9a6a89b..966a18a5d12d 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (c) 2010 Ericsson AB. 4 * Copyright (c) 2010 Ericsson AB.
5 * 5 *
6 * Author: Guenter Roeck <guenter.roeck@ericsson.com> 6 * Author: Guenter Roeck <linux@roeck-us.net>
7 * 7 *
8 * Derived from: 8 * Derived from:
9 * pca954x.c 9 * pca954x.c
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 565bfb161c1a..a3fde52840ca 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1575,6 +1575,12 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
1575 1575
1576 neigh = dst_neigh_lookup(ep->dst, 1576 neigh = dst_neigh_lookup(ep->dst,
1577 &ep->com.cm_id->remote_addr.sin_addr.s_addr); 1577 &ep->com.cm_id->remote_addr.sin_addr.s_addr);
1578 if (!neigh) {
1579 pr_err("%s - cannot alloc neigh.\n", __func__);
1580 err = -ENOMEM;
1581 goto fail4;
1582 }
1583
1578 /* get a l2t entry */ 1584 /* get a l2t entry */
1579 if (neigh->dev->flags & IFF_LOOPBACK) { 1585 if (neigh->dev->flags & IFF_LOOPBACK) {
1580 PDBG("%s LOOPBACK\n", __func__); 1586 PDBG("%s LOOPBACK\n", __func__);
@@ -3053,6 +3059,12 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3053 dst = &rt->dst; 3059 dst = &rt->dst;
3054 neigh = dst_neigh_lookup_skb(dst, skb); 3060 neigh = dst_neigh_lookup_skb(dst, skb);
3055 3061
3062 if (!neigh) {
3063 pr_err("%s - failed to allocate neigh!\n",
3064 __func__);
3065 goto free_dst;
3066 }
3067
3056 if (neigh->dev->flags & IFF_LOOPBACK) { 3068 if (neigh->dev->flags & IFF_LOOPBACK) {
3057 pdev = ip_dev_find(&init_net, iph->daddr); 3069 pdev = ip_dev_find(&init_net, iph->daddr);
3058 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 3070 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 17ba4f8bc12d..70b1808a08f4 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -186,8 +186,10 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
186 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), 186 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
187 wq->rq.memsize, &(wq->rq.dma_addr), 187 wq->rq.memsize, &(wq->rq.dma_addr),
188 GFP_KERNEL); 188 GFP_KERNEL);
189 if (!wq->rq.queue) 189 if (!wq->rq.queue) {
190 ret = -ENOMEM;
190 goto free_sq; 191 goto free_sq;
192 }
191 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", 193 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
192 __func__, wq->sq.queue, 194 __func__, wq->sq.queue,
193 (unsigned long long)virt_to_phys(wq->sq.queue), 195 (unsigned long long)virt_to_phys(wq->sq.queue),
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 439c35d4a669..ea93870266eb 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -620,7 +620,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
620 goto bail; 620 goto bail;
621 } 621 }
622 622
623 opcode = be32_to_cpu(ohdr->bth[0]) >> 24; 623 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
624 dev->opstats[opcode].n_bytes += tlen; 624 dev->opstats[opcode].n_bytes += tlen;
625 dev->opstats[opcode].n_packets++; 625 dev->opstats[opcode].n_packets++;
626 626
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
index 8349f9c5064c..1e603a375069 100644
--- a/drivers/infiniband/hw/qib/Kconfig
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -1,7 +1,7 @@
1config INFINIBAND_QIB 1config INFINIBAND_QIB
2 tristate "QLogic PCIe HCA support" 2 tristate "Intel PCIe HCA support"
3 depends on 64BIT 3 depends on 64BIT
4 ---help--- 4 ---help---
5 This is a low-level driver for QLogic PCIe QLE InfiniBand host 5 This is a low-level driver for Intel PCIe QLE InfiniBand host
6 channel adapters. This driver does not support the QLogic 6 channel adapters. This driver does not support the Intel
7 HyperTransport card (model QHT7140). 7 HyperTransport card (model QHT7140).
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 5423edcab51f..216092477dfc 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013 Intel Corporation. All rights reserved.
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 5 *
@@ -63,8 +64,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
63 "Attempt pre-IBTA 1.2 DDR speed negotiation"); 64 "Attempt pre-IBTA 1.2 DDR speed negotiation");
64 65
65MODULE_LICENSE("Dual BSD/GPL"); 66MODULE_LICENSE("Dual BSD/GPL");
66MODULE_AUTHOR("QLogic <support@qlogic.com>"); 67MODULE_AUTHOR("Intel <ibsupport@intel.com>");
67MODULE_DESCRIPTION("QLogic IB driver"); 68MODULE_DESCRIPTION("Intel IB driver");
68MODULE_VERSION(QIB_DRIVER_VERSION); 69MODULE_VERSION(QIB_DRIVER_VERSION);
69 70
70/* 71/*
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a099ac171e22..0232ae56b1fa 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013 Intel Corporation. All rights reserved.
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. 3 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved. 4 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
@@ -51,7 +52,7 @@ static u32 qib_6120_iblink_state(u64);
51 52
52/* 53/*
53 * This file contains all the chip-specific register information and 54 * This file contains all the chip-specific register information and
54 * access functions for the QLogic QLogic_IB PCI-Express chip. 55 * access functions for the Intel Intel_IB PCI-Express chip.
55 * 56 *
56 */ 57 */
57 58
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 50e33aa0b4e3..173f805790da 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved. 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * 5 *
@@ -1138,7 +1138,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
1138static void qib_remove_one(struct pci_dev *); 1138static void qib_remove_one(struct pci_dev *);
1139static int qib_init_one(struct pci_dev *, const struct pci_device_id *); 1139static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
1140 1140
1141#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: " 1141#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
1142#define PFX QIB_DRV_NAME ": " 1142#define PFX QIB_DRV_NAME ": "
1143 1143
1144static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = { 1144static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
@@ -1355,7 +1355,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1355 dd = qib_init_iba6120_funcs(pdev, ent); 1355 dd = qib_init_iba6120_funcs(pdev, ent);
1356#else 1356#else
1357 qib_early_err(&pdev->dev, 1357 qib_early_err(&pdev->dev,
1358 "QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", 1358 "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
1359 ent->device); 1359 ent->device);
1360 dd = ERR_PTR(-ENODEV); 1360 dd = ERR_PTR(-ENODEV);
1361#endif 1361#endif
@@ -1371,7 +1371,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1371 1371
1372 default: 1372 default:
1373 qib_early_err(&pdev->dev, 1373 qib_early_err(&pdev->dev,
1374 "Failing on unknown QLogic deviceid 0x%x\n", 1374 "Failing on unknown Intel deviceid 0x%x\n",
1375 ent->device); 1375 ent->device);
1376 ret = -ENODEV; 1376 ret = -ENODEV;
1377 } 1377 }
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 50a8a0d4fe67..08a6c6d39e56 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved. 2 * Copyright (c) 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * 5 *
@@ -44,7 +44,7 @@
44#include "qib.h" 44#include "qib.h"
45#include "qib_7220.h" 45#include "qib_7220.h"
46 46
47#define SD7220_FW_NAME "qlogic/sd7220.fw" 47#define SD7220_FW_NAME "intel/sd7220.fw"
48MODULE_FIRMWARE(SD7220_FW_NAME); 48MODULE_FIRMWARE(SD7220_FW_NAME);
49 49
50/* 50/*
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index ba51a4715a1d..7c0ab16a2fe2 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved. 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * 5 *
@@ -2224,7 +2224,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
2224 ibdev->dma_ops = &qib_dma_mapping_ops; 2224 ibdev->dma_ops = &qib_dma_mapping_ops;
2225 2225
2226 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), 2226 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
2227 "QLogic Infiniband HCA %s", init_utsname()->nodename); 2227 "Intel Infiniband HCA %s", init_utsname()->nodename);
2228 2228
2229 ret = ib_register_device(ibdev, qib_create_port_files); 2229 ret = ib_register_device(ibdev, qib_create_port_files);
2230 if (ret) 2230 if (ret)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 67b0c1d23678..1ef880de3a41 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -758,9 +758,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
758 if (++priv->tx_outstanding == ipoib_sendq_size) { 758 if (++priv->tx_outstanding == ipoib_sendq_size) {
759 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", 759 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
760 tx->qp->qp_num); 760 tx->qp->qp_num);
761 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
762 ipoib_warn(priv, "request notify on send CQ failed\n");
763 netif_stop_queue(dev); 761 netif_stop_queue(dev);
762 rc = ib_req_notify_cq(priv->send_cq,
763 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
764 if (rc < 0)
765 ipoib_warn(priv, "request notify on send CQ failed\n");
766 else if (rc)
767 ipoib_send_comp_handler(priv->send_cq, dev);
764 } 768 }
765 } 769 }
766} 770}
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 7cd74e29cbc8..9135606c8649 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -158,14 +158,10 @@ static unsigned int get_time_pit(void)
158#define GET_TIME(x) rdtscl(x) 158#define GET_TIME(x) rdtscl(x)
159#define DELTA(x,y) ((y)-(x)) 159#define DELTA(x,y) ((y)-(x))
160#define TIME_NAME "TSC" 160#define TIME_NAME "TSC"
161#elif defined(__alpha__) 161#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_TILE)
162#define GET_TIME(x) do { x = get_cycles(); } while (0) 162#define GET_TIME(x) do { x = get_cycles(); } while (0)
163#define DELTA(x,y) ((y)-(x)) 163#define DELTA(x,y) ((y)-(x))
164#define TIME_NAME "PCC" 164#define TIME_NAME "get_cycles"
165#elif defined(CONFIG_MN10300) || defined(CONFIG_TILE)
166#define GET_TIME(x) do { x = get_cycles(); } while (0)
167#define DELTA(x, y) ((x) - (y))
168#define TIME_NAME "TSC"
169#else 165#else
170#define FAKE_TIME 166#define FAKE_TIME
171static unsigned long analog_faketime = 0; 167static unsigned long analog_faketime = 0;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 5c514d0711d1..c332fb98480d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -130,7 +130,7 @@ config IRQ_REMAP
130# OMAP IOMMU support 130# OMAP IOMMU support
131config OMAP_IOMMU 131config OMAP_IOMMU
132 bool "OMAP IOMMU Support" 132 bool "OMAP IOMMU Support"
133 depends on ARCH_OMAP 133 depends on ARCH_OMAP2PLUS
134 select IOMMU_API 134 select IOMMU_API
135 135
136config OMAP_IOVMM 136config OMAP_IOVMM
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 98f555dafb55..b287ca33833d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2466,18 +2466,16 @@ static int device_change_notifier(struct notifier_block *nb,
2466 2466
2467 /* allocate a protection domain if a device is added */ 2467 /* allocate a protection domain if a device is added */
2468 dma_domain = find_protection_domain(devid); 2468 dma_domain = find_protection_domain(devid);
2469 if (dma_domain) 2469 if (!dma_domain) {
2470 goto out; 2470 dma_domain = dma_ops_domain_alloc();
2471 dma_domain = dma_ops_domain_alloc(); 2471 if (!dma_domain)
2472 if (!dma_domain) 2472 goto out;
2473 goto out; 2473 dma_domain->target_dev = devid;
2474 dma_domain->target_dev = devid; 2474
2475 2475 spin_lock_irqsave(&iommu_pd_list_lock, flags);
2476 spin_lock_irqsave(&iommu_pd_list_lock, flags); 2476 list_add_tail(&dma_domain->list, &iommu_pd_list);
2477 list_add_tail(&dma_domain->list, &iommu_pd_list); 2477 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
2478 spin_unlock_irqrestore(&iommu_pd_list_lock, flags); 2478 }
2479
2480 dev_data = get_dev_data(dev);
2481 2479
2482 dev->archdata.dma_ops = &amd_iommu_dma_ops; 2480 dev->archdata.dma_ops = &amd_iommu_dma_ops;
2483 2481
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index b6ecddb63cd0..e3c2d74b7684 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -980,7 +980,7 @@ static void __init free_iommu_all(void)
980 * BIOS should disable L2B micellaneous clock gating by setting 980 * BIOS should disable L2B micellaneous clock gating by setting
981 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b 981 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
982 */ 982 */
983static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) 983static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
984{ 984{
985 u32 value; 985 u32 value;
986 986
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index d56f8c17c5fe..7c11ff368d07 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -2,7 +2,6 @@
2#include <linux/cpumask.h> 2#include <linux/cpumask.h>
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/cpumask.h>
6#include <linux/errno.h> 5#include <linux/errno.h>
7#include <linux/msi.h> 6#include <linux/msi.h>
8#include <linux/irq.h> 7#include <linux/irq.h>
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index 5313c9ea44dc..d9edcc94c2a8 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -237,7 +237,8 @@ config HISAX_MIC
237 237
238config HISAX_NETJET 238config HISAX_NETJET
239 bool "NETjet card" 239 bool "NETjet card"
240 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) 240 depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
241 depends on VIRT_TO_BUS
241 help 242 help
242 This enables HiSax support for the NetJet from Traverse 243 This enables HiSax support for the NetJet from Traverse
243 Technologies. 244 Technologies.
@@ -248,7 +249,8 @@ config HISAX_NETJET
248 249
249config HISAX_NETJET_U 250config HISAX_NETJET_U
250 bool "NETspider U card" 251 bool "NETspider U card"
251 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) 252 depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
253 depends on VIRT_TO_BUS
252 help 254 help
253 This enables HiSax support for the Netspider U interface ISDN card 255 This enables HiSax support for the Netspider U interface ISDN card
254 from Traverse Technologies. 256 from Traverse Technologies.
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 3c955e10a618..c6083132c4b8 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1025,6 +1025,8 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
1025{ 1025{
1026 struct blk_plug plug; 1026 struct blk_plug plug;
1027 1027
1028 BUG_ON(dm_bufio_in_request());
1029
1028 blk_start_plug(&plug); 1030 blk_start_plug(&plug);
1029 dm_bufio_lock(c); 1031 dm_bufio_lock(c);
1030 1032
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index fbd3625f2748..83e995fece88 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -83,6 +83,8 @@ struct cache_disk_superblock {
83 __le32 read_misses; 83 __le32 read_misses;
84 __le32 write_hits; 84 __le32 write_hits;
85 __le32 write_misses; 85 __le32 write_misses;
86
87 __le32 policy_version[CACHE_POLICY_VERSION_SIZE];
86} __packed; 88} __packed;
87 89
88struct dm_cache_metadata { 90struct dm_cache_metadata {
@@ -109,6 +111,7 @@ struct dm_cache_metadata {
109 bool clean_when_opened:1; 111 bool clean_when_opened:1;
110 112
111 char policy_name[CACHE_POLICY_NAME_SIZE]; 113 char policy_name[CACHE_POLICY_NAME_SIZE];
114 unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
112 size_t policy_hint_size; 115 size_t policy_hint_size;
113 struct dm_cache_statistics stats; 116 struct dm_cache_statistics stats;
114}; 117};
@@ -268,7 +271,8 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
268 memset(disk_super->uuid, 0, sizeof(disk_super->uuid)); 271 memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
269 disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC); 272 disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
270 disk_super->version = cpu_to_le32(CACHE_VERSION); 273 disk_super->version = cpu_to_le32(CACHE_VERSION);
271 memset(disk_super->policy_name, 0, CACHE_POLICY_NAME_SIZE); 274 memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
275 memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
272 disk_super->policy_hint_size = 0; 276 disk_super->policy_hint_size = 0;
273 277
274 r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, 278 r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
@@ -284,7 +288,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
284 disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); 288 disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
285 disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); 289 disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
286 disk_super->cache_blocks = cpu_to_le32(0); 290 disk_super->cache_blocks = cpu_to_le32(0);
287 memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
288 291
289 disk_super->read_hits = cpu_to_le32(0); 292 disk_super->read_hits = cpu_to_le32(0);
290 disk_super->read_misses = cpu_to_le32(0); 293 disk_super->read_misses = cpu_to_le32(0);
@@ -478,6 +481,9 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd,
478 cmd->data_block_size = le32_to_cpu(disk_super->data_block_size); 481 cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
479 cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks)); 482 cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
480 strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name)); 483 strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
484 cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
485 cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
486 cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
481 cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size); 487 cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
482 488
483 cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits); 489 cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
@@ -572,6 +578,9 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
572 disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); 578 disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
573 disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); 579 disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
574 strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name)); 580 strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
581 disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
582 disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
583 disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
575 584
576 disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits); 585 disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
577 disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); 586 disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
@@ -854,18 +863,43 @@ struct thunk {
854 bool hints_valid; 863 bool hints_valid;
855}; 864};
856 865
866static bool policy_unchanged(struct dm_cache_metadata *cmd,
867 struct dm_cache_policy *policy)
868{
869 const char *policy_name = dm_cache_policy_get_name(policy);
870 const unsigned *policy_version = dm_cache_policy_get_version(policy);
871 size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
872
873 /*
874 * Ensure policy names match.
875 */
876 if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name)))
877 return false;
878
879 /*
880 * Ensure policy major versions match.
881 */
882 if (cmd->policy_version[0] != policy_version[0])
883 return false;
884
885 /*
886 * Ensure policy hint sizes match.
887 */
888 if (cmd->policy_hint_size != policy_hint_size)
889 return false;
890
891 return true;
892}
893
857static bool hints_array_initialized(struct dm_cache_metadata *cmd) 894static bool hints_array_initialized(struct dm_cache_metadata *cmd)
858{ 895{
859 return cmd->hint_root && cmd->policy_hint_size; 896 return cmd->hint_root && cmd->policy_hint_size;
860} 897}
861 898
862static bool hints_array_available(struct dm_cache_metadata *cmd, 899static bool hints_array_available(struct dm_cache_metadata *cmd,
863 const char *policy_name) 900 struct dm_cache_policy *policy)
864{ 901{
865 bool policy_names_match = !strncmp(cmd->policy_name, policy_name, 902 return cmd->clean_when_opened && policy_unchanged(cmd, policy) &&
866 sizeof(cmd->policy_name));
867
868 return cmd->clean_when_opened && policy_names_match &&
869 hints_array_initialized(cmd); 903 hints_array_initialized(cmd);
870} 904}
871 905
@@ -899,7 +933,8 @@ static int __load_mapping(void *context, uint64_t cblock, void *leaf)
899 return r; 933 return r;
900} 934}
901 935
902static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_name, 936static int __load_mappings(struct dm_cache_metadata *cmd,
937 struct dm_cache_policy *policy,
903 load_mapping_fn fn, void *context) 938 load_mapping_fn fn, void *context)
904{ 939{
905 struct thunk thunk; 940 struct thunk thunk;
@@ -909,18 +944,19 @@ static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_nam
909 944
910 thunk.cmd = cmd; 945 thunk.cmd = cmd;
911 thunk.respect_dirty_flags = cmd->clean_when_opened; 946 thunk.respect_dirty_flags = cmd->clean_when_opened;
912 thunk.hints_valid = hints_array_available(cmd, policy_name); 947 thunk.hints_valid = hints_array_available(cmd, policy);
913 948
914 return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk); 949 return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk);
915} 950}
916 951
917int dm_cache_load_mappings(struct dm_cache_metadata *cmd, const char *policy_name, 952int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
953 struct dm_cache_policy *policy,
918 load_mapping_fn fn, void *context) 954 load_mapping_fn fn, void *context)
919{ 955{
920 int r; 956 int r;
921 957
922 down_read(&cmd->root_lock); 958 down_read(&cmd->root_lock);
923 r = __load_mappings(cmd, policy_name, fn, context); 959 r = __load_mappings(cmd, policy, fn, context);
924 up_read(&cmd->root_lock); 960 up_read(&cmd->root_lock);
925 961
926 return r; 962 return r;
@@ -979,7 +1015,7 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty
979 /* nothing to be done */ 1015 /* nothing to be done */
980 return 0; 1016 return 0;
981 1017
982 value = pack_value(oblock, flags | (dirty ? M_DIRTY : 0)); 1018 value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0));
983 __dm_bless_for_disk(&value); 1019 __dm_bless_for_disk(&value);
984 1020
985 r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock), 1021 r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
@@ -1070,13 +1106,15 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
1070 __le32 value; 1106 __le32 value;
1071 size_t hint_size; 1107 size_t hint_size;
1072 const char *policy_name = dm_cache_policy_get_name(policy); 1108 const char *policy_name = dm_cache_policy_get_name(policy);
1109 const unsigned *policy_version = dm_cache_policy_get_version(policy);
1073 1110
1074 if (!policy_name[0] || 1111 if (!policy_name[0] ||
1075 (strlen(policy_name) > sizeof(cmd->policy_name) - 1)) 1112 (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
1076 return -EINVAL; 1113 return -EINVAL;
1077 1114
1078 if (strcmp(cmd->policy_name, policy_name)) { 1115 if (!policy_unchanged(cmd, policy)) {
1079 strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name)); 1116 strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
1117 memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
1080 1118
1081 hint_size = dm_cache_policy_get_hint_size(policy); 1119 hint_size = dm_cache_policy_get_hint_size(policy);
1082 if (!hint_size) 1120 if (!hint_size)
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index 135864ea0eee..f45cef21f3d0 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -89,7 +89,7 @@ typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
89 dm_cblock_t cblock, bool dirty, 89 dm_cblock_t cblock, bool dirty,
90 uint32_t hint, bool hint_valid); 90 uint32_t hint, bool hint_valid);
91int dm_cache_load_mappings(struct dm_cache_metadata *cmd, 91int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
92 const char *policy_name, 92 struct dm_cache_policy *policy,
93 load_mapping_fn fn, 93 load_mapping_fn fn,
94 void *context); 94 void *context);
95 95
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
index cc05d70b3cb8..b04d1f904d07 100644
--- a/drivers/md/dm-cache-policy-cleaner.c
+++ b/drivers/md/dm-cache-policy-cleaner.c
@@ -17,7 +17,6 @@
17/*----------------------------------------------------------------*/ 17/*----------------------------------------------------------------*/
18 18
19#define DM_MSG_PREFIX "cache cleaner" 19#define DM_MSG_PREFIX "cache cleaner"
20#define CLEANER_VERSION "1.0.0"
21 20
22/* Cache entry struct. */ 21/* Cache entry struct. */
23struct wb_cache_entry { 22struct wb_cache_entry {
@@ -434,6 +433,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
434 433
435static struct dm_cache_policy_type wb_policy_type = { 434static struct dm_cache_policy_type wb_policy_type = {
436 .name = "cleaner", 435 .name = "cleaner",
436 .version = {1, 0, 0},
437 .hint_size = 0, 437 .hint_size = 0,
438 .owner = THIS_MODULE, 438 .owner = THIS_MODULE,
439 .create = wb_create 439 .create = wb_create
@@ -446,7 +446,10 @@ static int __init wb_init(void)
446 if (r < 0) 446 if (r < 0)
447 DMERR("register failed %d", r); 447 DMERR("register failed %d", r);
448 else 448 else
449 DMINFO("version " CLEANER_VERSION " loaded"); 449 DMINFO("version %u.%u.%u loaded",
450 wb_policy_type.version[0],
451 wb_policy_type.version[1],
452 wb_policy_type.version[2]);
450 453
451 return r; 454 return r;
452} 455}
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
index 52a75beeced5..0928abdc49f0 100644
--- a/drivers/md/dm-cache-policy-internal.h
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -117,6 +117,8 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p);
117 */ 117 */
118const char *dm_cache_policy_get_name(struct dm_cache_policy *p); 118const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
119 119
120const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p);
121
120size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p); 122size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
121 123
122/*----------------------------------------------------------------*/ 124/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 964153255076..dc112a7137fe 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -14,7 +14,6 @@
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15 15
16#define DM_MSG_PREFIX "cache-policy-mq" 16#define DM_MSG_PREFIX "cache-policy-mq"
17#define MQ_VERSION "1.0.0"
18 17
19static struct kmem_cache *mq_entry_cache; 18static struct kmem_cache *mq_entry_cache;
20 19
@@ -1133,6 +1132,7 @@ bad_cache_alloc:
1133 1132
1134static struct dm_cache_policy_type mq_policy_type = { 1133static struct dm_cache_policy_type mq_policy_type = {
1135 .name = "mq", 1134 .name = "mq",
1135 .version = {1, 0, 0},
1136 .hint_size = 4, 1136 .hint_size = 4,
1137 .owner = THIS_MODULE, 1137 .owner = THIS_MODULE,
1138 .create = mq_create 1138 .create = mq_create
@@ -1140,6 +1140,7 @@ static struct dm_cache_policy_type mq_policy_type = {
1140 1140
1141static struct dm_cache_policy_type default_policy_type = { 1141static struct dm_cache_policy_type default_policy_type = {
1142 .name = "default", 1142 .name = "default",
1143 .version = {1, 0, 0},
1143 .hint_size = 4, 1144 .hint_size = 4,
1144 .owner = THIS_MODULE, 1145 .owner = THIS_MODULE,
1145 .create = mq_create 1146 .create = mq_create
@@ -1164,7 +1165,10 @@ static int __init mq_init(void)
1164 1165
1165 r = dm_cache_policy_register(&default_policy_type); 1166 r = dm_cache_policy_register(&default_policy_type);
1166 if (!r) { 1167 if (!r) {
1167 DMINFO("version " MQ_VERSION " loaded"); 1168 DMINFO("version %u.%u.%u loaded",
1169 mq_policy_type.version[0],
1170 mq_policy_type.version[1],
1171 mq_policy_type.version[2]);
1168 return 0; 1172 return 0;
1169 } 1173 }
1170 1174
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
index 2cbf5fdaac52..21c03c570c06 100644
--- a/drivers/md/dm-cache-policy.c
+++ b/drivers/md/dm-cache-policy.c
@@ -150,6 +150,14 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
150} 150}
151EXPORT_SYMBOL_GPL(dm_cache_policy_get_name); 151EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
152 152
153const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p)
154{
155 struct dm_cache_policy_type *t = p->private;
156
157 return t->version;
158}
159EXPORT_SYMBOL_GPL(dm_cache_policy_get_version);
160
153size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p) 161size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p)
154{ 162{
155 struct dm_cache_policy_type *t = p->private; 163 struct dm_cache_policy_type *t = p->private;
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index f0f51b260544..558bdfdabf5f 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -196,6 +196,7 @@ struct dm_cache_policy {
196 * We maintain a little register of the different policy types. 196 * We maintain a little register of the different policy types.
197 */ 197 */
198#define CACHE_POLICY_NAME_SIZE 16 198#define CACHE_POLICY_NAME_SIZE 16
199#define CACHE_POLICY_VERSION_SIZE 3
199 200
200struct dm_cache_policy_type { 201struct dm_cache_policy_type {
201 /* For use by the register code only. */ 202 /* For use by the register code only. */
@@ -206,6 +207,7 @@ struct dm_cache_policy_type {
206 * what gets passed on the target line to select your policy. 207 * what gets passed on the target line to select your policy.
207 */ 208 */
208 char name[CACHE_POLICY_NAME_SIZE]; 209 char name[CACHE_POLICY_NAME_SIZE];
210 unsigned version[CACHE_POLICY_VERSION_SIZE];
209 211
210 /* 212 /*
211 * Policies may store a hint for each each cache block. 213 * Policies may store a hint for each each cache block.
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 0f4e84b15c30..66120bd46d15 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -142,6 +142,7 @@ struct cache {
142 spinlock_t lock; 142 spinlock_t lock;
143 struct bio_list deferred_bios; 143 struct bio_list deferred_bios;
144 struct bio_list deferred_flush_bios; 144 struct bio_list deferred_flush_bios;
145 struct bio_list deferred_writethrough_bios;
145 struct list_head quiesced_migrations; 146 struct list_head quiesced_migrations;
146 struct list_head completed_migrations; 147 struct list_head completed_migrations;
147 struct list_head need_commit_migrations; 148 struct list_head need_commit_migrations;
@@ -158,7 +159,7 @@ struct cache {
158 /* 159 /*
159 * origin_blocks entries, discarded if set. 160 * origin_blocks entries, discarded if set.
160 */ 161 */
161 sector_t discard_block_size; /* a power of 2 times sectors per block */ 162 uint32_t discard_block_size; /* a power of 2 times sectors per block */
162 dm_dblock_t discard_nr_blocks; 163 dm_dblock_t discard_nr_blocks;
163 unsigned long *discard_bitset; 164 unsigned long *discard_bitset;
164 165
@@ -199,6 +200,11 @@ struct per_bio_data {
199 bool tick:1; 200 bool tick:1;
200 unsigned req_nr:2; 201 unsigned req_nr:2;
201 struct dm_deferred_entry *all_io_entry; 202 struct dm_deferred_entry *all_io_entry;
203
204 /* writethrough fields */
205 struct cache *cache;
206 dm_cblock_t cblock;
207 bio_end_io_t *saved_bi_end_io;
202}; 208};
203 209
204struct dm_cache_migration { 210struct dm_cache_migration {
@@ -412,17 +418,24 @@ static bool block_size_is_power_of_two(struct cache *cache)
412 return cache->sectors_per_block_shift >= 0; 418 return cache->sectors_per_block_shift >= 0;
413} 419}
414 420
421static dm_block_t block_div(dm_block_t b, uint32_t n)
422{
423 do_div(b, n);
424
425 return b;
426}
427
415static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) 428static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
416{ 429{
417 sector_t discard_blocks = cache->discard_block_size; 430 uint32_t discard_blocks = cache->discard_block_size;
418 dm_block_t b = from_oblock(oblock); 431 dm_block_t b = from_oblock(oblock);
419 432
420 if (!block_size_is_power_of_two(cache)) 433 if (!block_size_is_power_of_two(cache))
421 (void) sector_div(discard_blocks, cache->sectors_per_block); 434 discard_blocks = discard_blocks / cache->sectors_per_block;
422 else 435 else
423 discard_blocks >>= cache->sectors_per_block_shift; 436 discard_blocks >>= cache->sectors_per_block_shift;
424 437
425 (void) sector_div(b, discard_blocks); 438 b = block_div(b, discard_blocks);
426 439
427 return to_dblock(b); 440 return to_dblock(b);
428} 441}
@@ -609,6 +622,56 @@ static void issue(struct cache *cache, struct bio *bio)
609 spin_unlock_irqrestore(&cache->lock, flags); 622 spin_unlock_irqrestore(&cache->lock, flags);
610} 623}
611 624
625static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
626{
627 unsigned long flags;
628
629 spin_lock_irqsave(&cache->lock, flags);
630 bio_list_add(&cache->deferred_writethrough_bios, bio);
631 spin_unlock_irqrestore(&cache->lock, flags);
632
633 wake_worker(cache);
634}
635
636static void writethrough_endio(struct bio *bio, int err)
637{
638 struct per_bio_data *pb = get_per_bio_data(bio);
639 bio->bi_end_io = pb->saved_bi_end_io;
640
641 if (err) {
642 bio_endio(bio, err);
643 return;
644 }
645
646 remap_to_cache(pb->cache, bio, pb->cblock);
647
648 /*
649 * We can't issue this bio directly, since we're in interrupt
650 * context. So it get's put on a bio list for processing by the
651 * worker thread.
652 */
653 defer_writethrough_bio(pb->cache, bio);
654}
655
656/*
657 * When running in writethrough mode we need to send writes to clean blocks
658 * to both the cache and origin devices. In future we'd like to clone the
659 * bio and send them in parallel, but for now we're doing them in
660 * series as this is easier.
661 */
662static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
663 dm_oblock_t oblock, dm_cblock_t cblock)
664{
665 struct per_bio_data *pb = get_per_bio_data(bio);
666
667 pb->cache = cache;
668 pb->cblock = cblock;
669 pb->saved_bi_end_io = bio->bi_end_io;
670 bio->bi_end_io = writethrough_endio;
671
672 remap_to_origin_clear_discard(pb->cache, bio, oblock);
673}
674
612/*---------------------------------------------------------------- 675/*----------------------------------------------------------------
613 * Migration processing 676 * Migration processing
614 * 677 *
@@ -1002,7 +1065,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio)
1002 dm_block_t end_block = bio->bi_sector + bio_sectors(bio); 1065 dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
1003 dm_block_t b; 1066 dm_block_t b;
1004 1067
1005 (void) sector_div(end_block, cache->discard_block_size); 1068 end_block = block_div(end_block, cache->discard_block_size);
1006 1069
1007 for (b = start_block; b < end_block; b++) 1070 for (b = start_block; b < end_block; b++)
1008 set_discard(cache, to_dblock(b)); 1071 set_discard(cache, to_dblock(b));
@@ -1070,14 +1133,9 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1070 inc_hit_counter(cache, bio); 1133 inc_hit_counter(cache, bio);
1071 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1134 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1072 1135
1073 if (is_writethrough_io(cache, bio, lookup_result.cblock)) { 1136 if (is_writethrough_io(cache, bio, lookup_result.cblock))
1074 /* 1137 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
1075 * No need to mark anything dirty in write through mode. 1138 else
1076 */
1077 pb->req_nr == 0 ?
1078 remap_to_cache(cache, bio, lookup_result.cblock) :
1079 remap_to_origin_clear_discard(cache, bio, block);
1080 } else
1081 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); 1139 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
1082 1140
1083 issue(cache, bio); 1141 issue(cache, bio);
@@ -1086,17 +1144,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1086 case POLICY_MISS: 1144 case POLICY_MISS:
1087 inc_miss_counter(cache, bio); 1145 inc_miss_counter(cache, bio);
1088 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1146 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1089 1147 remap_to_origin_clear_discard(cache, bio, block);
1090 if (pb->req_nr != 0) { 1148 issue(cache, bio);
1091 /*
1092 * This is a duplicate writethrough io that is no
1093 * longer needed because the block has been demoted.
1094 */
1095 bio_endio(bio, 0);
1096 } else {
1097 remap_to_origin_clear_discard(cache, bio, block);
1098 issue(cache, bio);
1099 }
1100 break; 1149 break;
1101 1150
1102 case POLICY_NEW: 1151 case POLICY_NEW:
@@ -1217,6 +1266,23 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
1217 submit_bios ? generic_make_request(bio) : bio_io_error(bio); 1266 submit_bios ? generic_make_request(bio) : bio_io_error(bio);
1218} 1267}
1219 1268
1269static void process_deferred_writethrough_bios(struct cache *cache)
1270{
1271 unsigned long flags;
1272 struct bio_list bios;
1273 struct bio *bio;
1274
1275 bio_list_init(&bios);
1276
1277 spin_lock_irqsave(&cache->lock, flags);
1278 bio_list_merge(&bios, &cache->deferred_writethrough_bios);
1279 bio_list_init(&cache->deferred_writethrough_bios);
1280 spin_unlock_irqrestore(&cache->lock, flags);
1281
1282 while ((bio = bio_list_pop(&bios)))
1283 generic_make_request(bio);
1284}
1285
1220static void writeback_some_dirty_blocks(struct cache *cache) 1286static void writeback_some_dirty_blocks(struct cache *cache)
1221{ 1287{
1222 int r = 0; 1288 int r = 0;
@@ -1313,6 +1379,7 @@ static int more_work(struct cache *cache)
1313 else 1379 else
1314 return !bio_list_empty(&cache->deferred_bios) || 1380 return !bio_list_empty(&cache->deferred_bios) ||
1315 !bio_list_empty(&cache->deferred_flush_bios) || 1381 !bio_list_empty(&cache->deferred_flush_bios) ||
1382 !bio_list_empty(&cache->deferred_writethrough_bios) ||
1316 !list_empty(&cache->quiesced_migrations) || 1383 !list_empty(&cache->quiesced_migrations) ||
1317 !list_empty(&cache->completed_migrations) || 1384 !list_empty(&cache->completed_migrations) ||
1318 !list_empty(&cache->need_commit_migrations); 1385 !list_empty(&cache->need_commit_migrations);
@@ -1331,6 +1398,8 @@ static void do_worker(struct work_struct *ws)
1331 1398
1332 writeback_some_dirty_blocks(cache); 1399 writeback_some_dirty_blocks(cache);
1333 1400
1401 process_deferred_writethrough_bios(cache);
1402
1334 if (commit_if_needed(cache)) { 1403 if (commit_if_needed(cache)) {
1335 process_deferred_flush_bios(cache, false); 1404 process_deferred_flush_bios(cache, false);
1336 1405
@@ -1756,8 +1825,11 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
1756 } 1825 }
1757 1826
1758 r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); 1827 r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
1759 if (r) 1828 if (r) {
1829 *error = "Error setting cache policy's config values";
1760 dm_cache_policy_destroy(cache->policy); 1830 dm_cache_policy_destroy(cache->policy);
1831 cache->policy = NULL;
1832 }
1761 1833
1762 return r; 1834 return r;
1763} 1835}
@@ -1793,8 +1865,6 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
1793 1865
1794#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) 1866#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
1795 1867
1796static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio);
1797
1798static int cache_create(struct cache_args *ca, struct cache **result) 1868static int cache_create(struct cache_args *ca, struct cache **result)
1799{ 1869{
1800 int r = 0; 1870 int r = 0;
@@ -1821,9 +1891,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1821 1891
1822 memcpy(&cache->features, &ca->features, sizeof(cache->features)); 1892 memcpy(&cache->features, &ca->features, sizeof(cache->features));
1823 1893
1824 if (cache->features.write_through)
1825 ti->num_write_bios = cache_num_write_bios;
1826
1827 cache->callbacks.congested_fn = cache_is_congested; 1894 cache->callbacks.congested_fn = cache_is_congested;
1828 dm_table_add_target_callbacks(ti->table, &cache->callbacks); 1895 dm_table_add_target_callbacks(ti->table, &cache->callbacks);
1829 1896
@@ -1835,7 +1902,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1835 1902
1836 /* FIXME: factor out this whole section */ 1903 /* FIXME: factor out this whole section */
1837 origin_blocks = cache->origin_sectors = ca->origin_sectors; 1904 origin_blocks = cache->origin_sectors = ca->origin_sectors;
1838 (void) sector_div(origin_blocks, ca->block_size); 1905 origin_blocks = block_div(origin_blocks, ca->block_size);
1839 cache->origin_blocks = to_oblock(origin_blocks); 1906 cache->origin_blocks = to_oblock(origin_blocks);
1840 1907
1841 cache->sectors_per_block = ca->block_size; 1908 cache->sectors_per_block = ca->block_size;
@@ -1848,7 +1915,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1848 dm_block_t cache_size = ca->cache_sectors; 1915 dm_block_t cache_size = ca->cache_sectors;
1849 1916
1850 cache->sectors_per_block_shift = -1; 1917 cache->sectors_per_block_shift = -1;
1851 (void) sector_div(cache_size, ca->block_size); 1918 cache_size = block_div(cache_size, ca->block_size);
1852 cache->cache_size = to_cblock(cache_size); 1919 cache->cache_size = to_cblock(cache_size);
1853 } else { 1920 } else {
1854 cache->sectors_per_block_shift = __ffs(ca->block_size); 1921 cache->sectors_per_block_shift = __ffs(ca->block_size);
@@ -1873,6 +1940,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1873 spin_lock_init(&cache->lock); 1940 spin_lock_init(&cache->lock);
1874 bio_list_init(&cache->deferred_bios); 1941 bio_list_init(&cache->deferred_bios);
1875 bio_list_init(&cache->deferred_flush_bios); 1942 bio_list_init(&cache->deferred_flush_bios);
1943 bio_list_init(&cache->deferred_writethrough_bios);
1876 INIT_LIST_HEAD(&cache->quiesced_migrations); 1944 INIT_LIST_HEAD(&cache->quiesced_migrations);
1877 INIT_LIST_HEAD(&cache->completed_migrations); 1945 INIT_LIST_HEAD(&cache->completed_migrations);
1878 INIT_LIST_HEAD(&cache->need_commit_migrations); 1946 INIT_LIST_HEAD(&cache->need_commit_migrations);
@@ -2002,6 +2070,8 @@ static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2002 goto out; 2070 goto out;
2003 2071
2004 r = cache_create(ca, &cache); 2072 r = cache_create(ca, &cache);
2073 if (r)
2074 goto out;
2005 2075
2006 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); 2076 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2007 if (r) { 2077 if (r) {
@@ -2016,20 +2086,6 @@ out:
2016 return r; 2086 return r;
2017} 2087}
2018 2088
2019static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio)
2020{
2021 int r;
2022 struct cache *cache = ti->private;
2023 dm_oblock_t block = get_bio_block(cache, bio);
2024 dm_cblock_t cblock;
2025
2026 r = policy_lookup(cache->policy, block, &cblock);
2027 if (r < 0)
2028 return 2; /* assume the worst */
2029
2030 return (!r && !is_dirty(cache, cblock)) ? 2 : 1;
2031}
2032
2033static int cache_map(struct dm_target *ti, struct bio *bio) 2089static int cache_map(struct dm_target *ti, struct bio *bio)
2034{ 2090{
2035 struct cache *cache = ti->private; 2091 struct cache *cache = ti->private;
@@ -2097,18 +2153,12 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2097 inc_hit_counter(cache, bio); 2153 inc_hit_counter(cache, bio);
2098 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 2154 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2099 2155
2100 if (is_writethrough_io(cache, bio, lookup_result.cblock)) { 2156 if (is_writethrough_io(cache, bio, lookup_result.cblock))
2101 /* 2157 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
2102 * No need to mark anything dirty in write through mode. 2158 else
2103 */
2104 pb->req_nr == 0 ?
2105 remap_to_cache(cache, bio, lookup_result.cblock) :
2106 remap_to_origin_clear_discard(cache, bio, block);
2107 cell_defer(cache, cell, false);
2108 } else {
2109 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); 2159 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
2110 cell_defer(cache, cell, false); 2160
2111 } 2161 cell_defer(cache, cell, false);
2112 break; 2162 break;
2113 2163
2114 case POLICY_MISS: 2164 case POLICY_MISS:
@@ -2319,8 +2369,7 @@ static int cache_preresume(struct dm_target *ti)
2319 } 2369 }
2320 2370
2321 if (!cache->loaded_mappings) { 2371 if (!cache->loaded_mappings) {
2322 r = dm_cache_load_mappings(cache->cmd, 2372 r = dm_cache_load_mappings(cache->cmd, cache->policy,
2323 dm_cache_policy_get_name(cache->policy),
2324 load_mapping, cache); 2373 load_mapping, cache);
2325 if (r) { 2374 if (r) {
2326 DMERR("could not load cache mappings"); 2375 DMERR("could not load cache mappings");
@@ -2535,7 +2584,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
2535 2584
2536static struct target_type cache_target = { 2585static struct target_type cache_target = {
2537 .name = "cache", 2586 .name = "cache",
2538 .version = {1, 0, 0}, 2587 .version = {1, 1, 0},
2539 .module = THIS_MODULE, 2588 .module = THIS_MODULE,
2540 .ctr = cache_ctr, 2589 .ctr = cache_ctr,
2541 .dtr = cache_dtr, 2590 .dtr = cache_dtr,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 009339d62828..004ad1652b73 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1577,6 +1577,11 @@ static bool data_dev_supports_discard(struct pool_c *pt)
1577 return q && blk_queue_discard(q); 1577 return q && blk_queue_discard(q);
1578} 1578}
1579 1579
1580static bool is_factor(sector_t block_size, uint32_t n)
1581{
1582 return !sector_div(block_size, n);
1583}
1584
1580/* 1585/*
1581 * If discard_passdown was enabled verify that the data device 1586 * If discard_passdown was enabled verify that the data device
1582 * supports discards. Disable discard_passdown if not. 1587 * supports discards. Disable discard_passdown if not.
@@ -1602,7 +1607,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
1602 else if (data_limits->discard_granularity > block_size) 1607 else if (data_limits->discard_granularity > block_size)
1603 reason = "discard granularity larger than a block"; 1608 reason = "discard granularity larger than a block";
1604 1609
1605 else if (block_size & (data_limits->discard_granularity - 1)) 1610 else if (!is_factor(block_size, data_limits->discard_granularity))
1606 reason = "discard granularity not a factor of block size"; 1611 reason = "discard granularity not a factor of block size";
1607 1612
1608 if (reason) { 1613 if (reason) {
@@ -2544,7 +2549,7 @@ static struct target_type pool_target = {
2544 .name = "thin-pool", 2549 .name = "thin-pool",
2545 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2550 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2546 DM_TARGET_IMMUTABLE, 2551 DM_TARGET_IMMUTABLE,
2547 .version = {1, 6, 1}, 2552 .version = {1, 7, 0},
2548 .module = THIS_MODULE, 2553 .module = THIS_MODULE,
2549 .ctr = pool_ctr, 2554 .ctr = pool_ctr,
2550 .dtr = pool_dtr, 2555 .dtr = pool_dtr,
@@ -2831,7 +2836,7 @@ static int thin_iterate_devices(struct dm_target *ti,
2831 2836
2832static struct target_type thin_target = { 2837static struct target_type thin_target = {
2833 .name = "thin", 2838 .name = "thin",
2834 .version = {1, 7, 1}, 2839 .version = {1, 8, 0},
2835 .module = THIS_MODULE, 2840 .module = THIS_MODULE,
2836 .ctr = thin_ctr, 2841 .ctr = thin_ctr,
2837 .dtr = thin_dtr, 2842 .dtr = thin_dtr,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 4f06d9adf1ed..b948fd864d45 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -93,6 +93,13 @@ struct dm_verity_io {
93 */ 93 */
94}; 94};
95 95
96struct dm_verity_prefetch_work {
97 struct work_struct work;
98 struct dm_verity *v;
99 sector_t block;
100 unsigned n_blocks;
101};
102
96static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io) 103static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io)
97{ 104{
98 return (struct shash_desc *)(io + 1); 105 return (struct shash_desc *)(io + 1);
@@ -424,15 +431,18 @@ static void verity_end_io(struct bio *bio, int error)
424 * The root buffer is not prefetched, it is assumed that it will be cached 431 * The root buffer is not prefetched, it is assumed that it will be cached
425 * all the time. 432 * all the time.
426 */ 433 */
427static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io) 434static void verity_prefetch_io(struct work_struct *work)
428{ 435{
436 struct dm_verity_prefetch_work *pw =
437 container_of(work, struct dm_verity_prefetch_work, work);
438 struct dm_verity *v = pw->v;
429 int i; 439 int i;
430 440
431 for (i = v->levels - 2; i >= 0; i--) { 441 for (i = v->levels - 2; i >= 0; i--) {
432 sector_t hash_block_start; 442 sector_t hash_block_start;
433 sector_t hash_block_end; 443 sector_t hash_block_end;
434 verity_hash_at_level(v, io->block, i, &hash_block_start, NULL); 444 verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
435 verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL); 445 verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
436 if (!i) { 446 if (!i) {
437 unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); 447 unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
438 448
@@ -452,6 +462,25 @@ no_prefetch_cluster:
452 dm_bufio_prefetch(v->bufio, hash_block_start, 462 dm_bufio_prefetch(v->bufio, hash_block_start,
453 hash_block_end - hash_block_start + 1); 463 hash_block_end - hash_block_start + 1);
454 } 464 }
465
466 kfree(pw);
467}
468
469static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
470{
471 struct dm_verity_prefetch_work *pw;
472
473 pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
474 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
475
476 if (!pw)
477 return;
478
479 INIT_WORK(&pw->work, verity_prefetch_io);
480 pw->v = v;
481 pw->block = io->block;
482 pw->n_blocks = io->n_blocks;
483 queue_work(v->verify_wq, &pw->work);
455} 484}
456 485
457/* 486/*
@@ -498,7 +527,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
498 memcpy(io->io_vec, bio_iovec(bio), 527 memcpy(io->io_vec, bio_iovec(bio),
499 io->io_vec_size * sizeof(struct bio_vec)); 528 io->io_vec_size * sizeof(struct bio_vec));
500 529
501 verity_prefetch_io(v, io); 530 verity_submit_prefetch(v, io);
502 531
503 generic_make_request(bio); 532 generic_make_request(bio);
504 533
@@ -858,7 +887,7 @@ bad:
858 887
859static struct target_type verity_target = { 888static struct target_type verity_target = {
860 .name = "verity", 889 .name = "verity",
861 .version = {1, 1, 1}, 890 .version = {1, 2, 0},
862 .module = THIS_MODULE, 891 .module = THIS_MODULE,
863 .ctr = verity_ctr, 892 .ctr = verity_ctr,
864 .dtr = verity_dtr, 893 .dtr = verity_dtr,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d323676580a9..1d03ebde40b5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7654,10 +7654,8 @@ static int remove_and_add_spares(struct mddev *mddev)
7654 removed++; 7654 removed++;
7655 } 7655 }
7656 } 7656 }
7657 if (removed) 7657 if (removed && mddev->kobj.sd)
7658 sysfs_notify(&mddev->kobj, NULL, 7658 sysfs_notify(&mddev->kobj, NULL, "degraded");
7659 "degraded");
7660
7661 7659
7662 rdev_for_each(rdev, mddev) { 7660 rdev_for_each(rdev, mddev) {
7663 if (rdev->raid_disk >= 0 && 7661 if (rdev->raid_disk >= 0 &&
diff --git a/drivers/md/md.h b/drivers/md/md.h
index eca59c3074ef..d90fb1a879e1 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -506,7 +506,7 @@ static inline char * mdname (struct mddev * mddev)
506static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) 506static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
507{ 507{
508 char nm[20]; 508 char nm[20];
509 if (!test_bit(Replacement, &rdev->flags)) { 509 if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
510 sprintf(nm, "rd%d", rdev->raid_disk); 510 sprintf(nm, "rd%d", rdev->raid_disk);
511 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 511 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
512 } else 512 } else
@@ -516,7 +516,7 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
516static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) 516static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
517{ 517{
518 char nm[20]; 518 char nm[20];
519 if (!test_bit(Replacement, &rdev->flags)) { 519 if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
520 sprintf(nm, "rd%d", rdev->raid_disk); 520 sprintf(nm, "rd%d", rdev->raid_disk);
521 sysfs_remove_link(&mddev->kobj, nm); 521 sysfs_remove_link(&mddev->kobj, nm);
522 } 522 }
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index c4f28133ef82..b88757cd0d1d 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -139,15 +139,8 @@ struct child {
139 struct btree_node *n; 139 struct btree_node *n;
140}; 140};
141 141
142static struct dm_btree_value_type le64_type = { 142static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
143 .context = NULL, 143 struct btree_node *parent,
144 .size = sizeof(__le64),
145 .inc = NULL,
146 .dec = NULL,
147 .equal = NULL
148};
149
150static int init_child(struct dm_btree_info *info, struct btree_node *parent,
151 unsigned index, struct child *result) 144 unsigned index, struct child *result)
152{ 145{
153 int r, inc; 146 int r, inc;
@@ -164,7 +157,7 @@ static int init_child(struct dm_btree_info *info, struct btree_node *parent,
164 result->n = dm_block_data(result->block); 157 result->n = dm_block_data(result->block);
165 158
166 if (inc) 159 if (inc)
167 inc_children(info->tm, result->n, &le64_type); 160 inc_children(info->tm, result->n, vt);
168 161
169 *((__le64 *) value_ptr(parent, index)) = 162 *((__le64 *) value_ptr(parent, index)) =
170 cpu_to_le64(dm_block_location(result->block)); 163 cpu_to_le64(dm_block_location(result->block));
@@ -236,7 +229,7 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
236} 229}
237 230
238static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, 231static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
239 unsigned left_index) 232 struct dm_btree_value_type *vt, unsigned left_index)
240{ 233{
241 int r; 234 int r;
242 struct btree_node *parent; 235 struct btree_node *parent;
@@ -244,11 +237,11 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
244 237
245 parent = dm_block_data(shadow_current(s)); 238 parent = dm_block_data(shadow_current(s));
246 239
247 r = init_child(info, parent, left_index, &left); 240 r = init_child(info, vt, parent, left_index, &left);
248 if (r) 241 if (r)
249 return r; 242 return r;
250 243
251 r = init_child(info, parent, left_index + 1, &right); 244 r = init_child(info, vt, parent, left_index + 1, &right);
252 if (r) { 245 if (r) {
253 exit_child(info, &left); 246 exit_child(info, &left);
254 return r; 247 return r;
@@ -368,7 +361,7 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
368} 361}
369 362
370static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, 363static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
371 unsigned left_index) 364 struct dm_btree_value_type *vt, unsigned left_index)
372{ 365{
373 int r; 366 int r;
374 struct btree_node *parent = dm_block_data(shadow_current(s)); 367 struct btree_node *parent = dm_block_data(shadow_current(s));
@@ -377,17 +370,17 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
377 /* 370 /*
378 * FIXME: fill out an array? 371 * FIXME: fill out an array?
379 */ 372 */
380 r = init_child(info, parent, left_index, &left); 373 r = init_child(info, vt, parent, left_index, &left);
381 if (r) 374 if (r)
382 return r; 375 return r;
383 376
384 r = init_child(info, parent, left_index + 1, &center); 377 r = init_child(info, vt, parent, left_index + 1, &center);
385 if (r) { 378 if (r) {
386 exit_child(info, &left); 379 exit_child(info, &left);
387 return r; 380 return r;
388 } 381 }
389 382
390 r = init_child(info, parent, left_index + 2, &right); 383 r = init_child(info, vt, parent, left_index + 2, &right);
391 if (r) { 384 if (r) {
392 exit_child(info, &left); 385 exit_child(info, &left);
393 exit_child(info, &center); 386 exit_child(info, &center);
@@ -434,7 +427,8 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
434} 427}
435 428
436static int rebalance_children(struct shadow_spine *s, 429static int rebalance_children(struct shadow_spine *s,
437 struct dm_btree_info *info, uint64_t key) 430 struct dm_btree_info *info,
431 struct dm_btree_value_type *vt, uint64_t key)
438{ 432{
439 int i, r, has_left_sibling, has_right_sibling; 433 int i, r, has_left_sibling, has_right_sibling;
440 uint32_t child_entries; 434 uint32_t child_entries;
@@ -472,13 +466,13 @@ static int rebalance_children(struct shadow_spine *s,
472 has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); 466 has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
473 467
474 if (!has_left_sibling) 468 if (!has_left_sibling)
475 r = rebalance2(s, info, i); 469 r = rebalance2(s, info, vt, i);
476 470
477 else if (!has_right_sibling) 471 else if (!has_right_sibling)
478 r = rebalance2(s, info, i - 1); 472 r = rebalance2(s, info, vt, i - 1);
479 473
480 else 474 else
481 r = rebalance3(s, info, i - 1); 475 r = rebalance3(s, info, vt, i - 1);
482 476
483 return r; 477 return r;
484} 478}
@@ -529,7 +523,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
529 if (le32_to_cpu(n->header.flags) & LEAF_NODE) 523 if (le32_to_cpu(n->header.flags) & LEAF_NODE)
530 return do_leaf(n, key, index); 524 return do_leaf(n, key, index);
531 525
532 r = rebalance_children(s, info, key); 526 r = rebalance_children(s, info, vt, key);
533 if (r) 527 if (r)
534 break; 528 break;
535 529
@@ -550,6 +544,14 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
550 return r; 544 return r;
551} 545}
552 546
547static struct dm_btree_value_type le64_type = {
548 .context = NULL,
549 .size = sizeof(__le64),
550 .inc = NULL,
551 .dec = NULL,
552 .equal = NULL
553};
554
553int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, 555int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
554 uint64_t *keys, dm_block_t *new_root) 556 uint64_t *keys, dm_block_t *new_root)
555{ 557{
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7bbd28546214..2fefb9f2198e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -667,9 +667,11 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
667 bi->bi_size = STRIPE_SIZE; 667 bi->bi_size = STRIPE_SIZE;
668 if (rrdev) 668 if (rrdev)
669 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 669 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
670 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), 670
671 bi, disk_devt(conf->mddev->gendisk), 671 if (conf->mddev->gendisk)
672 sh->dev[i].sector); 672 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
673 bi, disk_devt(conf->mddev->gendisk),
674 sh->dev[i].sector);
673 generic_make_request(bi); 675 generic_make_request(bi);
674 } 676 }
675 if (rrdev) { 677 if (rrdev) {
@@ -700,9 +702,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
700 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 702 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
701 rbi->bi_io_vec[0].bv_offset = 0; 703 rbi->bi_io_vec[0].bv_offset = 0;
702 rbi->bi_size = STRIPE_SIZE; 704 rbi->bi_size = STRIPE_SIZE;
703 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 705 if (conf->mddev->gendisk)
704 rbi, disk_devt(conf->mddev->gendisk), 706 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
705 sh->dev[i].sector); 707 rbi, disk_devt(conf->mddev->gendisk),
708 sh->dev[i].sector);
706 generic_make_request(rbi); 709 generic_make_request(rbi);
707 } 710 }
708 if (!rdev && !rrdev) { 711 if (!rdev && !rrdev) {
@@ -2279,17 +2282,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2279 int level = conf->level; 2282 int level = conf->level;
2280 2283
2281 if (rcw) { 2284 if (rcw) {
2282 /* if we are not expanding this is a proper write request, and
2283 * there will be bios with new data to be drained into the
2284 * stripe cache
2285 */
2286 if (!expand) {
2287 sh->reconstruct_state = reconstruct_state_drain_run;
2288 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2289 } else
2290 sh->reconstruct_state = reconstruct_state_run;
2291
2292 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2293 2285
2294 for (i = disks; i--; ) { 2286 for (i = disks; i--; ) {
2295 struct r5dev *dev = &sh->dev[i]; 2287 struct r5dev *dev = &sh->dev[i];
@@ -2302,6 +2294,21 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2302 s->locked++; 2294 s->locked++;
2303 } 2295 }
2304 } 2296 }
2297 /* if we are not expanding this is a proper write request, and
2298 * there will be bios with new data to be drained into the
2299 * stripe cache
2300 */
2301 if (!expand) {
2302 if (!s->locked)
2303 /* False alarm, nothing to do */
2304 return;
2305 sh->reconstruct_state = reconstruct_state_drain_run;
2306 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2307 } else
2308 sh->reconstruct_state = reconstruct_state_run;
2309
2310 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2311
2305 if (s->locked + conf->max_degraded == disks) 2312 if (s->locked + conf->max_degraded == disks)
2306 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2313 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2307 atomic_inc(&conf->pending_full_writes); 2314 atomic_inc(&conf->pending_full_writes);
@@ -2310,11 +2317,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2310 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 2317 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2311 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 2318 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2312 2319
2313 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2314 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2315 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2316 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2317
2318 for (i = disks; i--; ) { 2320 for (i = disks; i--; ) {
2319 struct r5dev *dev = &sh->dev[i]; 2321 struct r5dev *dev = &sh->dev[i];
2320 if (i == pd_idx) 2322 if (i == pd_idx)
@@ -2329,6 +2331,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2329 s->locked++; 2331 s->locked++;
2330 } 2332 }
2331 } 2333 }
2334 if (!s->locked)
2335 /* False alarm - nothing to do */
2336 return;
2337 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2338 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2339 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2340 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2332 } 2341 }
2333 2342
2334 /* keep the parity disk(s) locked while asynchronous operations 2343 /* keep the parity disk(s) locked while asynchronous operations
@@ -2563,6 +2572,8 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
2563 int i; 2572 int i;
2564 2573
2565 clear_bit(STRIPE_SYNCING, &sh->state); 2574 clear_bit(STRIPE_SYNCING, &sh->state);
2575 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
2576 wake_up(&conf->wait_for_overlap);
2566 s->syncing = 0; 2577 s->syncing = 0;
2567 s->replacing = 0; 2578 s->replacing = 0;
2568 /* There is nothing more to do for sync/check/repair. 2579 /* There is nothing more to do for sync/check/repair.
@@ -2736,6 +2747,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
2736{ 2747{
2737 int i; 2748 int i;
2738 struct r5dev *dev; 2749 struct r5dev *dev;
2750 int discard_pending = 0;
2739 2751
2740 for (i = disks; i--; ) 2752 for (i = disks; i--; )
2741 if (sh->dev[i].written) { 2753 if (sh->dev[i].written) {
@@ -2764,9 +2776,23 @@ static void handle_stripe_clean_event(struct r5conf *conf,
2764 STRIPE_SECTORS, 2776 STRIPE_SECTORS,
2765 !test_bit(STRIPE_DEGRADED, &sh->state), 2777 !test_bit(STRIPE_DEGRADED, &sh->state),
2766 0); 2778 0);
2767 } 2779 } else if (test_bit(R5_Discard, &dev->flags))
2768 } else if (test_bit(R5_Discard, &sh->dev[i].flags)) 2780 discard_pending = 1;
2769 clear_bit(R5_Discard, &sh->dev[i].flags); 2781 }
2782 if (!discard_pending &&
2783 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
2784 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
2785 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
2786 if (sh->qd_idx >= 0) {
2787 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
2788 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
2789 }
2790 /* now that discard is done we can proceed with any sync */
2791 clear_bit(STRIPE_DISCARD, &sh->state);
2792 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
2793 set_bit(STRIPE_HANDLE, &sh->state);
2794
2795 }
2770 2796
2771 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2797 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2772 if (atomic_dec_and_test(&conf->pending_full_writes)) 2798 if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -2825,8 +2851,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
2825 set_bit(STRIPE_HANDLE, &sh->state); 2851 set_bit(STRIPE_HANDLE, &sh->state);
2826 if (rmw < rcw && rmw > 0) { 2852 if (rmw < rcw && rmw > 0) {
2827 /* prefer read-modify-write, but need to get some data */ 2853 /* prefer read-modify-write, but need to get some data */
2828 blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d", 2854 if (conf->mddev->queue)
2829 (unsigned long long)sh->sector, rmw); 2855 blk_add_trace_msg(conf->mddev->queue,
2856 "raid5 rmw %llu %d",
2857 (unsigned long long)sh->sector, rmw);
2830 for (i = disks; i--; ) { 2858 for (i = disks; i--; ) {
2831 struct r5dev *dev = &sh->dev[i]; 2859 struct r5dev *dev = &sh->dev[i];
2832 if ((dev->towrite || i == sh->pd_idx) && 2860 if ((dev->towrite || i == sh->pd_idx) &&
@@ -2876,7 +2904,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
2876 } 2904 }
2877 } 2905 }
2878 } 2906 }
2879 if (rcw) 2907 if (rcw && conf->mddev->queue)
2880 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", 2908 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
2881 (unsigned long long)sh->sector, 2909 (unsigned long long)sh->sector,
2882 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 2910 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
@@ -3416,9 +3444,15 @@ static void handle_stripe(struct stripe_head *sh)
3416 return; 3444 return;
3417 } 3445 }
3418 3446
3419 if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 3447 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3420 set_bit(STRIPE_SYNCING, &sh->state); 3448 spin_lock(&sh->stripe_lock);
3421 clear_bit(STRIPE_INSYNC, &sh->state); 3449 /* Cannot process 'sync' concurrently with 'discard' */
3450 if (!test_bit(STRIPE_DISCARD, &sh->state) &&
3451 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3452 set_bit(STRIPE_SYNCING, &sh->state);
3453 clear_bit(STRIPE_INSYNC, &sh->state);
3454 }
3455 spin_unlock(&sh->stripe_lock);
3422 } 3456 }
3423 clear_bit(STRIPE_DELAYED, &sh->state); 3457 clear_bit(STRIPE_DELAYED, &sh->state);
3424 3458
@@ -3578,6 +3612,8 @@ static void handle_stripe(struct stripe_head *sh)
3578 test_bit(STRIPE_INSYNC, &sh->state)) { 3612 test_bit(STRIPE_INSYNC, &sh->state)) {
3579 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3613 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3580 clear_bit(STRIPE_SYNCING, &sh->state); 3614 clear_bit(STRIPE_SYNCING, &sh->state);
3615 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
3616 wake_up(&conf->wait_for_overlap);
3581 } 3617 }
3582 3618
3583 /* If the failed drives are just a ReadError, then we might need 3619 /* If the failed drives are just a ReadError, then we might need
@@ -3981,9 +4017,10 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
3981 atomic_inc(&conf->active_aligned_reads); 4017 atomic_inc(&conf->active_aligned_reads);
3982 spin_unlock_irq(&conf->device_lock); 4018 spin_unlock_irq(&conf->device_lock);
3983 4019
3984 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 4020 if (mddev->gendisk)
3985 align_bi, disk_devt(mddev->gendisk), 4021 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
3986 raid_bio->bi_sector); 4022 align_bi, disk_devt(mddev->gendisk),
4023 raid_bio->bi_sector);
3987 generic_make_request(align_bi); 4024 generic_make_request(align_bi);
3988 return 1; 4025 return 1;
3989 } else { 4026 } else {
@@ -4077,7 +4114,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
4077 } 4114 }
4078 spin_unlock_irq(&conf->device_lock); 4115 spin_unlock_irq(&conf->device_lock);
4079 } 4116 }
4080 trace_block_unplug(mddev->queue, cnt, !from_schedule); 4117 if (mddev->queue)
4118 trace_block_unplug(mddev->queue, cnt, !from_schedule);
4081 kfree(cb); 4119 kfree(cb);
4082} 4120}
4083 4121
@@ -4140,6 +4178,13 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
4140 sh = get_active_stripe(conf, logical_sector, 0, 0, 0); 4178 sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
4141 prepare_to_wait(&conf->wait_for_overlap, &w, 4179 prepare_to_wait(&conf->wait_for_overlap, &w,
4142 TASK_UNINTERRUPTIBLE); 4180 TASK_UNINTERRUPTIBLE);
4181 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
4182 if (test_bit(STRIPE_SYNCING, &sh->state)) {
4183 release_stripe(sh);
4184 schedule();
4185 goto again;
4186 }
4187 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
4143 spin_lock_irq(&sh->stripe_lock); 4188 spin_lock_irq(&sh->stripe_lock);
4144 for (d = 0; d < conf->raid_disks; d++) { 4189 for (d = 0; d < conf->raid_disks; d++) {
4145 if (d == sh->pd_idx || d == sh->qd_idx) 4190 if (d == sh->pd_idx || d == sh->qd_idx)
@@ -4152,6 +4197,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
4152 goto again; 4197 goto again;
4153 } 4198 }
4154 } 4199 }
4200 set_bit(STRIPE_DISCARD, &sh->state);
4155 finish_wait(&conf->wait_for_overlap, &w); 4201 finish_wait(&conf->wait_for_overlap, &w);
4156 for (d = 0; d < conf->raid_disks; d++) { 4202 for (d = 0; d < conf->raid_disks; d++) {
4157 if (d == sh->pd_idx || d == sh->qd_idx) 4203 if (d == sh->pd_idx || d == sh->qd_idx)
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 18b2c4a8a1fd..b0b663b119a8 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -221,10 +221,6 @@ struct stripe_head {
221 struct stripe_operations { 221 struct stripe_operations {
222 int target, target2; 222 int target, target2;
223 enum sum_check_flags zero_sum_result; 223 enum sum_check_flags zero_sum_result;
224 #ifdef CONFIG_MULTICORE_RAID456
225 unsigned long request;
226 wait_queue_head_t wait_for_ops;
227 #endif
228 } ops; 224 } ops;
229 struct r5dev { 225 struct r5dev {
230 /* rreq and rvec are used for the replacement device when 226 /* rreq and rvec are used for the replacement device when
@@ -323,6 +319,7 @@ enum {
323 STRIPE_COMPUTE_RUN, 319 STRIPE_COMPUTE_RUN,
324 STRIPE_OPS_REQ_PENDING, 320 STRIPE_OPS_REQ_PENDING,
325 STRIPE_ON_UNPLUG_LIST, 321 STRIPE_ON_UNPLUG_LIST,
322 STRIPE_DISCARD,
326}; 323};
327 324
328/* 325/*
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index d4e7567b367c..0b899cb6cda1 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -724,7 +724,7 @@ static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
724 if (enable) { 724 if (enable) {
725 if (is_code(code, M5MOLS_RESTYPE_MONITOR)) 725 if (is_code(code, M5MOLS_RESTYPE_MONITOR))
726 ret = m5mols_start_monitor(info); 726 ret = m5mols_start_monitor(info);
727 if (is_code(code, M5MOLS_RESTYPE_CAPTURE)) 727 else if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
728 ret = m5mols_start_capture(info); 728 ret = m5mols_start_capture(info);
729 else 729 else
730 ret = -EINVAL; 730 ret = -EINVAL;
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index ccd18e4ee789..54579e4c740b 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -250,17 +250,19 @@ static u8 SRAM_Table[][60] =
250 vdelay start of active video in 2 * field lines relative to 250 vdelay start of active video in 2 * field lines relative to
251 trailing edge of /VRESET pulse (VDELAY register). 251 trailing edge of /VRESET pulse (VDELAY register).
252 sheight height of active video in 2 * field lines. 252 sheight height of active video in 2 * field lines.
253 extraheight Added to sheight for cropcap.bounds.height only
253 videostart0 ITU-R frame line number of the line corresponding 254 videostart0 ITU-R frame line number of the line corresponding
254 to vdelay in the first field. */ 255 to vdelay in the first field. */
255#define CROPCAP(minhdelayx1, hdelayx1, swidth, totalwidth, sqwidth, \ 256#define CROPCAP(minhdelayx1, hdelayx1, swidth, totalwidth, sqwidth, \
256 vdelay, sheight, videostart0) \ 257 vdelay, sheight, extraheight, videostart0) \
257 .cropcap.bounds.left = minhdelayx1, \ 258 .cropcap.bounds.left = minhdelayx1, \
258 /* * 2 because vertically we count field lines times two, */ \ 259 /* * 2 because vertically we count field lines times two, */ \
259 /* e.g. 23 * 2 to 23 * 2 + 576 in PAL-BGHI defrect. */ \ 260 /* e.g. 23 * 2 to 23 * 2 + 576 in PAL-BGHI defrect. */ \
260 .cropcap.bounds.top = (videostart0) * 2 - (vdelay) + MIN_VDELAY, \ 261 .cropcap.bounds.top = (videostart0) * 2 - (vdelay) + MIN_VDELAY, \
261 /* 4 is a safety margin at the end of the line. */ \ 262 /* 4 is a safety margin at the end of the line. */ \
262 .cropcap.bounds.width = (totalwidth) - (minhdelayx1) - 4, \ 263 .cropcap.bounds.width = (totalwidth) - (minhdelayx1) - 4, \
263 .cropcap.bounds.height = (sheight) + (vdelay) - MIN_VDELAY, \ 264 .cropcap.bounds.height = (sheight) + (extraheight) + (vdelay) - \
265 MIN_VDELAY, \
264 .cropcap.defrect.left = hdelayx1, \ 266 .cropcap.defrect.left = hdelayx1, \
265 .cropcap.defrect.top = (videostart0) * 2, \ 267 .cropcap.defrect.top = (videostart0) * 2, \
266 .cropcap.defrect.width = swidth, \ 268 .cropcap.defrect.width = swidth, \
@@ -301,9 +303,10 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
301 /* totalwidth */ 1135, 303 /* totalwidth */ 1135,
302 /* sqwidth */ 944, 304 /* sqwidth */ 944,
303 /* vdelay */ 0x20, 305 /* vdelay */ 0x20,
304 /* bt878 (and bt848?) can capture another 306 /* sheight */ 576,
305 line below active video. */ 307 /* bt878 (and bt848?) can capture another
306 /* sheight */ (576 + 2) + 0x20 - 2, 308 line below active video. */
309 /* extraheight */ 2,
307 /* videostart0 */ 23) 310 /* videostart0 */ 23)
308 },{ 311 },{
309 .v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR, 312 .v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR,
@@ -330,6 +333,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
330 /* sqwidth */ 780, 333 /* sqwidth */ 780,
331 /* vdelay */ 0x1a, 334 /* vdelay */ 0x1a,
332 /* sheight */ 480, 335 /* sheight */ 480,
336 /* extraheight */ 0,
333 /* videostart0 */ 23) 337 /* videostart0 */ 23)
334 },{ 338 },{
335 .v4l2_id = V4L2_STD_SECAM, 339 .v4l2_id = V4L2_STD_SECAM,
@@ -355,6 +359,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
355 /* sqwidth */ 944, 359 /* sqwidth */ 944,
356 /* vdelay */ 0x20, 360 /* vdelay */ 0x20,
357 /* sheight */ 576, 361 /* sheight */ 576,
362 /* extraheight */ 0,
358 /* videostart0 */ 23) 363 /* videostart0 */ 23)
359 },{ 364 },{
360 .v4l2_id = V4L2_STD_PAL_Nc, 365 .v4l2_id = V4L2_STD_PAL_Nc,
@@ -380,6 +385,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
380 /* sqwidth */ 780, 385 /* sqwidth */ 780,
381 /* vdelay */ 0x1a, 386 /* vdelay */ 0x1a,
382 /* sheight */ 576, 387 /* sheight */ 576,
388 /* extraheight */ 0,
383 /* videostart0 */ 23) 389 /* videostart0 */ 23)
384 },{ 390 },{
385 .v4l2_id = V4L2_STD_PAL_M, 391 .v4l2_id = V4L2_STD_PAL_M,
@@ -405,6 +411,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
405 /* sqwidth */ 780, 411 /* sqwidth */ 780,
406 /* vdelay */ 0x1a, 412 /* vdelay */ 0x1a,
407 /* sheight */ 480, 413 /* sheight */ 480,
414 /* extraheight */ 0,
408 /* videostart0 */ 23) 415 /* videostart0 */ 23)
409 },{ 416 },{
410 .v4l2_id = V4L2_STD_PAL_N, 417 .v4l2_id = V4L2_STD_PAL_N,
@@ -430,6 +437,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
430 /* sqwidth */ 944, 437 /* sqwidth */ 944,
431 /* vdelay */ 0x20, 438 /* vdelay */ 0x20,
432 /* sheight */ 576, 439 /* sheight */ 576,
440 /* extraheight */ 0,
433 /* videostart0 */ 23) 441 /* videostart0 */ 23)
434 },{ 442 },{
435 .v4l2_id = V4L2_STD_NTSC_M_JP, 443 .v4l2_id = V4L2_STD_NTSC_M_JP,
@@ -455,6 +463,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
455 /* sqwidth */ 780, 463 /* sqwidth */ 780,
456 /* vdelay */ 0x16, 464 /* vdelay */ 0x16,
457 /* sheight */ 480, 465 /* sheight */ 480,
466 /* extraheight */ 0,
458 /* videostart0 */ 23) 467 /* videostart0 */ 23)
459 },{ 468 },{
460 /* that one hopefully works with the strange timing 469 /* that one hopefully works with the strange timing
@@ -484,6 +493,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
484 /* sqwidth */ 944, 493 /* sqwidth */ 944,
485 /* vdelay */ 0x1a, 494 /* vdelay */ 0x1a,
486 /* sheight */ 480, 495 /* sheight */ 480,
496 /* extraheight */ 0,
487 /* videostart0 */ 23) 497 /* videostart0 */ 23)
488 } 498 }
489}; 499};
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 82d9f6ac12f3..33b5ffc8d66d 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -1054,16 +1054,18 @@ static int gsc_m2m_suspend(struct gsc_dev *gsc)
1054 1054
1055static int gsc_m2m_resume(struct gsc_dev *gsc) 1055static int gsc_m2m_resume(struct gsc_dev *gsc)
1056{ 1056{
1057 struct gsc_ctx *ctx;
1057 unsigned long flags; 1058 unsigned long flags;
1058 1059
1059 spin_lock_irqsave(&gsc->slock, flags); 1060 spin_lock_irqsave(&gsc->slock, flags);
1060 /* Clear for full H/W setup in first run after resume */ 1061 /* Clear for full H/W setup in first run after resume */
1062 ctx = gsc->m2m.ctx;
1061 gsc->m2m.ctx = NULL; 1063 gsc->m2m.ctx = NULL;
1062 spin_unlock_irqrestore(&gsc->slock, flags); 1064 spin_unlock_irqrestore(&gsc->slock, flags);
1063 1065
1064 if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state)) 1066 if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
1065 gsc_m2m_job_finish(gsc->m2m.ctx, 1067 gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
1066 VB2_BUF_STATE_ERROR); 1068
1067 return 0; 1069 return 0;
1068} 1070}
1069 1071
@@ -1204,7 +1206,7 @@ static int gsc_resume(struct device *dev)
1204 /* Do not resume if the device was idle before system suspend */ 1206 /* Do not resume if the device was idle before system suspend */
1205 spin_lock_irqsave(&gsc->slock, flags); 1207 spin_lock_irqsave(&gsc->slock, flags);
1206 if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) || 1208 if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) ||
1207 !gsc_m2m_active(gsc)) { 1209 !gsc_m2m_opened(gsc)) {
1208 spin_unlock_irqrestore(&gsc->slock, flags); 1210 spin_unlock_irqrestore(&gsc->slock, flags);
1209 return 0; 1211 return 0;
1210 } 1212 }
diff --git a/drivers/media/platform/s5p-fimc/fimc-core.c b/drivers/media/platform/s5p-fimc/fimc-core.c
index e3916bde45cf..0f513dd19f86 100644
--- a/drivers/media/platform/s5p-fimc/fimc-core.c
+++ b/drivers/media/platform/s5p-fimc/fimc-core.c
@@ -850,16 +850,18 @@ static int fimc_m2m_suspend(struct fimc_dev *fimc)
850 850
851static int fimc_m2m_resume(struct fimc_dev *fimc) 851static int fimc_m2m_resume(struct fimc_dev *fimc)
852{ 852{
853 struct fimc_ctx *ctx;
853 unsigned long flags; 854 unsigned long flags;
854 855
855 spin_lock_irqsave(&fimc->slock, flags); 856 spin_lock_irqsave(&fimc->slock, flags);
856 /* Clear for full H/W setup in first run after resume */ 857 /* Clear for full H/W setup in first run after resume */
858 ctx = fimc->m2m.ctx;
857 fimc->m2m.ctx = NULL; 859 fimc->m2m.ctx = NULL;
858 spin_unlock_irqrestore(&fimc->slock, flags); 860 spin_unlock_irqrestore(&fimc->slock, flags);
859 861
860 if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state)) 862 if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state))
861 fimc_m2m_job_finish(fimc->m2m.ctx, 863 fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
862 VB2_BUF_STATE_ERROR); 864
863 return 0; 865 return 0;
864} 866}
865 867
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite-reg.c b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
index f0af0754a7b4..ac9663ce2a49 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
+++ b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
@@ -128,10 +128,10 @@ static const u32 src_pixfmt_map[8][3] = {
128void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f) 128void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
129{ 129{
130 enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code; 130 enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code;
131 unsigned int i = ARRAY_SIZE(src_pixfmt_map); 131 int i = ARRAY_SIZE(src_pixfmt_map);
132 u32 cfg; 132 u32 cfg;
133 133
134 while (i-- >= 0) { 134 while (--i >= 0) {
135 if (src_pixfmt_map[i][0] == pixelcode) 135 if (src_pixfmt_map[i][0] == pixelcode)
136 break; 136 break;
137 } 137 }
@@ -224,9 +224,9 @@ static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
224 { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY }, 224 { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
225 }; 225 };
226 u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT); 226 u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
227 unsigned int i = ARRAY_SIZE(pixcode); 227 int i = ARRAY_SIZE(pixcode);
228 228
229 while (i-- >= 0) 229 while (--i >= 0)
230 if (pixcode[i][0] == dev->fmt->mbus_code) 230 if (pixcode[i][0] == dev->fmt->mbus_code)
231 break; 231 break;
232 cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK; 232 cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite.c b/drivers/media/platform/s5p-fimc/fimc-lite.c
index bfc4206935c8..bbc35de7db27 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite.c
+++ b/drivers/media/platform/s5p-fimc/fimc-lite.c
@@ -1408,6 +1408,7 @@ static const struct v4l2_ctrl_config fimc_lite_ctrl = {
1408 .id = V4L2_CTRL_CLASS_USER | 0x1001, 1408 .id = V4L2_CTRL_CLASS_USER | 0x1001,
1409 .type = V4L2_CTRL_TYPE_BOOLEAN, 1409 .type = V4L2_CTRL_TYPE_BOOLEAN,
1410 .name = "Test Pattern 640x480", 1410 .name = "Test Pattern 640x480",
1411 .step = 1,
1411}; 1412};
1412 1413
1413static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc) 1414static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
index a17fcb2d5d41..cd38d708ab58 100644
--- a/drivers/media/platform/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
@@ -827,7 +827,7 @@ static int fimc_md_link_notify(struct media_pad *source,
827 struct fimc_pipeline *pipeline; 827 struct fimc_pipeline *pipeline;
828 struct v4l2_subdev *sd; 828 struct v4l2_subdev *sd;
829 struct mutex *lock; 829 struct mutex *lock;
830 int ret = 0; 830 int i, ret = 0;
831 int ref_count; 831 int ref_count;
832 832
833 if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV) 833 if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
@@ -854,29 +854,28 @@ static int fimc_md_link_notify(struct media_pad *source,
854 return 0; 854 return 0;
855 } 855 }
856 856
857 mutex_lock(lock);
858 ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
859
857 if (!(flags & MEDIA_LNK_FL_ENABLED)) { 860 if (!(flags & MEDIA_LNK_FL_ENABLED)) {
858 int i; 861 if (ref_count > 0) {
859 mutex_lock(lock); 862 ret = __fimc_pipeline_close(pipeline);
860 ret = __fimc_pipeline_close(pipeline); 863 if (!ret && fimc)
864 fimc_ctrls_delete(fimc->vid_cap.ctx);
865 }
861 for (i = 0; i < IDX_MAX; i++) 866 for (i = 0; i < IDX_MAX; i++)
862 pipeline->subdevs[i] = NULL; 867 pipeline->subdevs[i] = NULL;
863 if (fimc) 868 } else if (ref_count > 0) {
864 fimc_ctrls_delete(fimc->vid_cap.ctx); 869 /*
865 mutex_unlock(lock); 870 * Link activation. Enable power of pipeline elements only if
866 return ret; 871 * the pipeline is already in use, i.e. its video node is open.
872 * Recreate the controls destroyed during the link deactivation.
873 */
874 ret = __fimc_pipeline_open(pipeline,
875 source->entity, true);
876 if (!ret && fimc)
877 ret = fimc_capture_ctrls_create(fimc);
867 } 878 }
868 /*
869 * Link activation. Enable power of pipeline elements only if the
870 * pipeline is already in use, i.e. its video node is opened.
871 * Recreate the controls destroyed during the link deactivation.
872 */
873 mutex_lock(lock);
874
875 ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
876 if (ref_count > 0)
877 ret = __fimc_pipeline_open(pipeline, source->entity, true);
878 if (!ret && fimc)
879 ret = fimc_capture_ctrls_create(fimc);
880 879
881 mutex_unlock(lock); 880 mutex_unlock(lock);
882 return ret ? -EPIPE : ret; 881 return ret ? -EPIPE : ret;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index e84703c314ce..1cb6d57987c6 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -276,7 +276,7 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
276 unsigned int frame_type; 276 unsigned int frame_type;
277 277
278 dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev); 278 dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
279 frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); 279 frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_disp_frame_type, ctx);
280 280
281 /* If frame is same as previous then skip and do not dequeue */ 281 /* If frame is same as previous then skip and do not dequeue */
282 if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) { 282 if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 2356fd52a169..4f6b553c4b2d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -232,6 +232,7 @@ static struct mfc_control controls[] = {
232 .minimum = 0, 232 .minimum = 0,
233 .maximum = 1, 233 .maximum = 1,
234 .default_value = 0, 234 .default_value = 0,
235 .step = 1,
235 .menu_skip_mask = 0, 236 .menu_skip_mask = 0,
236 }, 237 },
237 { 238 {
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 19f3563c61da..5a79c333d45e 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -291,7 +291,7 @@ config IR_TTUSBIR
291 291
292config IR_RX51 292config IR_RX51
293 tristate "Nokia N900 IR transmitter diode" 293 tristate "Nokia N900 IR transmitter diode"
294 depends on OMAP_DM_TIMER && LIRC && !ARCH_MULTIPLATFORM 294 depends on OMAP_DM_TIMER && ARCH_OMAP2PLUS && LIRC && !ARCH_MULTIPLATFORM
295 ---help--- 295 ---help---
296 Say Y or M here if you want to enable support for the IR 296 Say Y or M here if you want to enable support for the IR
297 transmitter diode built in the Nokia N900 (RX51) device. 297 transmitter diode built in the Nokia N900 (RX51) device.
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index a9d355230e8e..768aaf62d5dc 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -10,7 +10,7 @@ ifeq ($(CONFIG_COMPAT),y)
10 videodev-objs += v4l2-compat-ioctl32.o 10 videodev-objs += v4l2-compat-ioctl32.o
11endif 11endif
12 12
13obj-$(CONFIG_VIDEO_DEV) += videodev.o 13obj-$(CONFIG_VIDEO_V4L2) += videodev.o
14obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o 14obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
15obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o 15obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
16 16
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 45ea7185c003..642c6223fa6c 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -152,6 +152,20 @@ static void mei_me_intr_disable(struct mei_device *dev)
152} 152}
153 153
154/** 154/**
155 * mei_me_hw_reset_release - release device from the reset
156 *
157 * @dev: the device structure
158 */
159static void mei_me_hw_reset_release(struct mei_device *dev)
160{
161 struct mei_me_hw *hw = to_me_hw(dev);
162 u32 hcsr = mei_hcsr_read(hw);
163
164 hcsr |= H_IG;
165 hcsr &= ~H_RST;
166 mei_hcsr_set(hw, hcsr);
167}
168/**
155 * mei_me_hw_reset - resets fw via mei csr register. 169 * mei_me_hw_reset - resets fw via mei csr register.
156 * 170 *
157 * @dev: the device structure 171 * @dev: the device structure
@@ -169,18 +183,14 @@ static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
169 if (intr_enable) 183 if (intr_enable)
170 hcsr |= H_IE; 184 hcsr |= H_IE;
171 else 185 else
172 hcsr &= ~H_IE; 186 hcsr |= ~H_IE;
173
174 mei_hcsr_set(hw, hcsr);
175
176 hcsr = mei_hcsr_read(hw) | H_IG;
177 hcsr &= ~H_RST;
178 187
179 mei_hcsr_set(hw, hcsr); 188 mei_hcsr_set(hw, hcsr);
180 189
181 hcsr = mei_hcsr_read(hw); 190 if (dev->dev_state == MEI_DEV_POWER_DOWN)
191 mei_me_hw_reset_release(dev);
182 192
183 dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr); 193 dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", mei_hcsr_read(hw));
184} 194}
185 195
186/** 196/**
@@ -466,7 +476,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
466 mutex_unlock(&dev->device_lock); 476 mutex_unlock(&dev->device_lock);
467 return IRQ_HANDLED; 477 return IRQ_HANDLED;
468 } else { 478 } else {
469 dev_dbg(&dev->pdev->dev, "FW not ready.\n"); 479 dev_dbg(&dev->pdev->dev, "Reset Completed.\n");
480 mei_me_hw_reset_release(dev);
470 mutex_unlock(&dev->device_lock); 481 mutex_unlock(&dev->device_lock);
471 return IRQ_HANDLED; 482 return IRQ_HANDLED;
472 } 483 }
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 6ec530168afb..356179991a2e 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -183,6 +183,24 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
183 mei_cl_all_write_clear(dev); 183 mei_cl_all_write_clear(dev);
184} 184}
185 185
186void mei_stop(struct mei_device *dev)
187{
188 dev_dbg(&dev->pdev->dev, "stopping the device.\n");
189
190 mutex_lock(&dev->device_lock);
191
192 cancel_delayed_work(&dev->timer_work);
193
194 mei_wd_stop(dev);
195
196 dev->dev_state = MEI_DEV_POWER_DOWN;
197 mei_reset(dev, 0);
198
199 mutex_unlock(&dev->device_lock);
200
201 flush_scheduled_work();
202}
203
186 204
187 205
188 206
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index cb80166161f0..97873812e33b 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -381,6 +381,7 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
381void mei_device_init(struct mei_device *dev); 381void mei_device_init(struct mei_device *dev);
382void mei_reset(struct mei_device *dev, int interrupts); 382void mei_reset(struct mei_device *dev, int interrupts);
383int mei_hw_init(struct mei_device *dev); 383int mei_hw_init(struct mei_device *dev);
384void mei_stop(struct mei_device *dev);
384 385
385/* 386/*
386 * MEI interrupt functions prototype 387 * MEI interrupt functions prototype
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index b40ec0601ab0..b8b5c9c3ad03 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -247,44 +247,14 @@ static void mei_remove(struct pci_dev *pdev)
247 247
248 hw = to_me_hw(dev); 248 hw = to_me_hw(dev);
249 249
250 mutex_lock(&dev->device_lock);
251
252 cancel_delayed_work(&dev->timer_work);
253 250
254 mei_wd_stop(dev); 251 dev_err(&pdev->dev, "stop\n");
252 mei_stop(dev);
255 253
256 mei_pdev = NULL; 254 mei_pdev = NULL;
257 255
258 if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
259 dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
260 mei_cl_disconnect(&dev->iamthif_cl);
261 }
262 if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
263 dev->wd_cl.state = MEI_FILE_DISCONNECTING;
264 mei_cl_disconnect(&dev->wd_cl);
265 }
266
267 /* Unregistering watchdog device */
268 mei_watchdog_unregister(dev); 256 mei_watchdog_unregister(dev);
269 257
270 /* remove entry if already in list */
271 dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
272
273 if (dev->open_handle_count > 0)
274 dev->open_handle_count--;
275 mei_cl_unlink(&dev->wd_cl);
276
277 if (dev->open_handle_count > 0)
278 dev->open_handle_count--;
279 mei_cl_unlink(&dev->iamthif_cl);
280
281 dev->iamthif_current_cb = NULL;
282 dev->me_clients_num = 0;
283
284 mutex_unlock(&dev->device_lock);
285
286 flush_scheduled_work();
287
288 /* disable interrupts */ 258 /* disable interrupts */
289 mei_disable_interrupts(dev); 259 mei_disable_interrupts(dev);
290 260
@@ -308,28 +278,20 @@ static int mei_pci_suspend(struct device *device)
308{ 278{
309 struct pci_dev *pdev = to_pci_dev(device); 279 struct pci_dev *pdev = to_pci_dev(device);
310 struct mei_device *dev = pci_get_drvdata(pdev); 280 struct mei_device *dev = pci_get_drvdata(pdev);
311 int err;
312 281
313 if (!dev) 282 if (!dev)
314 return -ENODEV; 283 return -ENODEV;
315 mutex_lock(&dev->device_lock);
316 284
317 cancel_delayed_work(&dev->timer_work); 285 dev_err(&pdev->dev, "suspend\n");
318 286
319 /* Stop watchdog if exists */ 287 mei_stop(dev);
320 err = mei_wd_stop(dev); 288
321 /* Set new mei state */ 289 mei_disable_interrupts(dev);
322 if (dev->dev_state == MEI_DEV_ENABLED ||
323 dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
324 dev->dev_state = MEI_DEV_POWER_DOWN;
325 mei_reset(dev, 0);
326 }
327 mutex_unlock(&dev->device_lock);
328 290
329 free_irq(pdev->irq, dev); 291 free_irq(pdev->irq, dev);
330 pci_disable_msi(pdev); 292 pci_disable_msi(pdev);
331 293
332 return err; 294 return 0;
333} 295}
334 296
335static int mei_pci_resume(struct device *device) 297static int mei_pci_resume(struct device *device)
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
index ed5c433cd493..f3cdd904fe4d 100644
--- a/drivers/misc/vmw_vmci/vmci_datagram.c
+++ b/drivers/misc/vmw_vmci/vmci_datagram.c
@@ -42,9 +42,11 @@ struct datagram_entry {
42 42
43struct delayed_datagram_info { 43struct delayed_datagram_info {
44 struct datagram_entry *entry; 44 struct datagram_entry *entry;
45 struct vmci_datagram msg;
46 struct work_struct work; 45 struct work_struct work;
47 bool in_dg_host_queue; 46 bool in_dg_host_queue;
47 /* msg and msg_payload must be together. */
48 struct vmci_datagram msg;
49 u8 msg_payload[];
48}; 50};
49 51
50/* Number of in-flight host->host datagrams */ 52/* Number of in-flight host->host datagrams */
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 63feb75cc8e0..9279a9174f84 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -19,6 +19,12 @@
19/* 10 parts were found on sflash on Netgear WNDR4500 */ 19/* 10 parts were found on sflash on Netgear WNDR4500 */
20#define BCM47XXPART_MAX_PARTS 12 20#define BCM47XXPART_MAX_PARTS 12
21 21
22/*
23 * Amount of bytes we read when analyzing each block of flash memory.
24 * Set it big enough to allow detecting partition and reading important data.
25 */
26#define BCM47XXPART_BYTES_TO_READ 0x404
27
22/* Magics */ 28/* Magics */
23#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */ 29#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
24#define POT_MAGIC1 0x54544f50 /* POTT */ 30#define POT_MAGIC1 0x54544f50 /* POTT */
@@ -57,17 +63,15 @@ static int bcm47xxpart_parse(struct mtd_info *master,
57 struct trx_header *trx; 63 struct trx_header *trx;
58 int trx_part = -1; 64 int trx_part = -1;
59 int last_trx_part = -1; 65 int last_trx_part = -1;
60 int max_bytes_to_read = 0x8004; 66 int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
61 67
62 if (blocksize <= 0x10000) 68 if (blocksize <= 0x10000)
63 blocksize = 0x10000; 69 blocksize = 0x10000;
64 if (blocksize == 0x20000)
65 max_bytes_to_read = 0x18004;
66 70
67 /* Alloc */ 71 /* Alloc */
68 parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, 72 parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
69 GFP_KERNEL); 73 GFP_KERNEL);
70 buf = kzalloc(max_bytes_to_read, GFP_KERNEL); 74 buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
71 75
72 /* Parse block by block looking for magics */ 76 /* Parse block by block looking for magics */
73 for (offset = 0; offset <= master->size - blocksize; 77 for (offset = 0; offset <= master->size - blocksize;
@@ -82,7 +86,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
82 } 86 }
83 87
84 /* Read beginning of the block */ 88 /* Read beginning of the block */
85 if (mtd_read(master, offset, max_bytes_to_read, 89 if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
86 &bytes_read, (uint8_t *)buf) < 0) { 90 &bytes_read, (uint8_t *)buf) < 0) {
87 pr_err("mtd_read error while parsing (offset: 0x%X)!\n", 91 pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
88 offset); 92 offset);
@@ -96,20 +100,6 @@ static int bcm47xxpart_parse(struct mtd_info *master,
96 continue; 100 continue;
97 } 101 }
98 102
99 /* Standard NVRAM */
100 if (buf[0x000 / 4] == NVRAM_HEADER ||
101 buf[0x1000 / 4] == NVRAM_HEADER ||
102 buf[0x8000 / 4] == NVRAM_HEADER ||
103 (blocksize == 0x20000 && (
104 buf[0x10000 / 4] == NVRAM_HEADER ||
105 buf[0x11000 / 4] == NVRAM_HEADER ||
106 buf[0x18000 / 4] == NVRAM_HEADER))) {
107 bcm47xxpart_add_part(&parts[curr_part++], "nvram",
108 offset, 0);
109 offset = rounddown(offset, blocksize);
110 continue;
111 }
112
113 /* 103 /*
114 * board_data starts with board_id which differs across boards, 104 * board_data starts with board_id which differs across boards,
115 * but we can use 'MPFR' (hopefully) magic at 0x100 105 * but we can use 'MPFR' (hopefully) magic at 0x100
@@ -178,6 +168,30 @@ static int bcm47xxpart_parse(struct mtd_info *master,
178 continue; 168 continue;
179 } 169 }
180 } 170 }
171
172 /* Look for NVRAM at the end of the last block. */
173 for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) {
174 if (curr_part > BCM47XXPART_MAX_PARTS) {
175 pr_warn("Reached maximum number of partitions, scanning stopped!\n");
176 break;
177 }
178
179 offset = master->size - possible_nvram_sizes[i];
180 if (mtd_read(master, offset, 0x4, &bytes_read,
181 (uint8_t *)buf) < 0) {
182 pr_err("mtd_read error while reading at offset 0x%X!\n",
183 offset);
184 continue;
185 }
186
187 /* Standard NVRAM */
188 if (buf[0] == NVRAM_HEADER) {
189 bcm47xxpart_add_part(&parts[curr_part++], "nvram",
190 master->size - blocksize, 0);
191 break;
192 }
193 }
194
181 kfree(buf); 195 kfree(buf);
182 196
183 /* 197 /*
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 43214151b882..42c63927609d 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1523,6 +1523,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1523 oobreadlen -= toread; 1523 oobreadlen -= toread;
1524 } 1524 }
1525 } 1525 }
1526
1527 if (chip->options & NAND_NEED_READRDY) {
1528 /* Apply delay or wait for ready/busy pin */
1529 if (!chip->dev_ready)
1530 udelay(chip->chip_delay);
1531 else
1532 nand_wait_ready(mtd);
1533 }
1526 } else { 1534 } else {
1527 memcpy(buf, chip->buffers->databuf + col, bytes); 1535 memcpy(buf, chip->buffers->databuf + col, bytes);
1528 buf += bytes; 1536 buf += bytes;
@@ -1787,6 +1795,14 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1787 len = min(len, readlen); 1795 len = min(len, readlen);
1788 buf = nand_transfer_oob(chip, buf, ops, len); 1796 buf = nand_transfer_oob(chip, buf, ops, len);
1789 1797
1798 if (chip->options & NAND_NEED_READRDY) {
1799 /* Apply delay or wait for ready/busy pin */
1800 if (!chip->dev_ready)
1801 udelay(chip->chip_delay);
1802 else
1803 nand_wait_ready(mtd);
1804 }
1805
1790 readlen -= len; 1806 readlen -= len;
1791 if (!readlen) 1807 if (!readlen)
1792 break; 1808 break;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index e3aa2748a6e7..9c612388e5de 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -22,49 +22,51 @@
22* 512 512 Byte page size 22* 512 512 Byte page size
23*/ 23*/
24struct nand_flash_dev nand_flash_ids[] = { 24struct nand_flash_dev nand_flash_ids[] = {
25#define SP_OPTIONS NAND_NEED_READRDY
26#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
25 27
26#ifdef CONFIG_MTD_NAND_MUSEUM_IDS 28#ifdef CONFIG_MTD_NAND_MUSEUM_IDS
27 {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0}, 29 {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, SP_OPTIONS},
28 {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0}, 30 {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, SP_OPTIONS},
29 {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0}, 31 {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, SP_OPTIONS},
30 {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0}, 32 {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, SP_OPTIONS},
31 {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0}, 33 {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, SP_OPTIONS},
32 {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0}, 34 {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, SP_OPTIONS},
33 {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0}, 35 {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, SP_OPTIONS},
34 {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0}, 36 {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, SP_OPTIONS},
35 {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0}, 37 {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, SP_OPTIONS},
36 {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0}, 38 {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, SP_OPTIONS},
37 39
38 {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0}, 40 {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, SP_OPTIONS},
39 {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0}, 41 {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, SP_OPTIONS},
40 {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16}, 42 {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, SP_OPTIONS16},
41 {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16}, 43 {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, SP_OPTIONS16},
42#endif 44#endif
43 45
44 {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0}, 46 {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, SP_OPTIONS},
45 {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0}, 47 {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, SP_OPTIONS},
46 {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16}, 48 {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, SP_OPTIONS16},
47 {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16}, 49 {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, SP_OPTIONS16},
48 50
49 {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0}, 51 {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, SP_OPTIONS},
50 {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0}, 52 {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, SP_OPTIONS},
51 {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16}, 53 {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, SP_OPTIONS16},
52 {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16}, 54 {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, SP_OPTIONS16},
53 55
54 {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0}, 56 {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, SP_OPTIONS},
55 {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0}, 57 {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, SP_OPTIONS},
56 {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16}, 58 {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, SP_OPTIONS16},
57 {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16}, 59 {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, SP_OPTIONS16},
58 60
59 {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0}, 61 {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, SP_OPTIONS},
60 {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0}, 62 {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, SP_OPTIONS},
61 {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0}, 63 {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, SP_OPTIONS},
62 {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16}, 64 {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, SP_OPTIONS16},
63 {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16}, 65 {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, SP_OPTIONS16},
64 {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16}, 66 {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, SP_OPTIONS16},
65 {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16}, 67 {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, SP_OPTIONS16},
66 68
67 {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0}, 69 {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, SP_OPTIONS},
68 70
69 /* 71 /*
70 * These are the new chips with large page size. The pagesize and the 72 * These are the new chips with large page size. The pagesize and the
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8b4e96e01d6c..6bbd90e1123c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1746,6 +1746,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1746 1746
1747 bond_compute_features(bond); 1747 bond_compute_features(bond);
1748 1748
1749 bond_update_speed_duplex(new_slave);
1750
1749 read_lock(&bond->lock); 1751 read_lock(&bond->lock);
1750 1752
1751 new_slave->last_arp_rx = jiffies - 1753 new_slave->last_arp_rx = jiffies -
@@ -1798,8 +1800,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1798 new_slave->link == BOND_LINK_DOWN ? "DOWN" : 1800 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
1799 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); 1801 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1800 1802
1801 bond_update_speed_duplex(new_slave);
1802
1803 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { 1803 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
1804 /* if there is a primary slave, remember it */ 1804 /* if there is a primary slave, remember it */
1805 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { 1805 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
@@ -2374,8 +2374,6 @@ static void bond_miimon_commit(struct bonding *bond)
2374 bond_set_backup_slave(slave); 2374 bond_set_backup_slave(slave);
2375 } 2375 }
2376 2376
2377 bond_update_speed_duplex(slave);
2378
2379 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", 2377 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
2380 bond->dev->name, slave->dev->name, 2378 bond->dev->name, slave->dev->name,
2381 slave->speed, slave->duplex ? "full" : "half"); 2379 slave->speed, slave->duplex ? "full" : "half");
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 1c9e09fbdff8..db103e03ba05 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -183,6 +183,11 @@ int bond_create_slave_symlinks(struct net_device *master,
183 sprintf(linkname, "slave_%s", slave->name); 183 sprintf(linkname, "slave_%s", slave->name);
184 ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj), 184 ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
185 linkname); 185 linkname);
186
187 /* free the master link created earlier in case of error */
188 if (ret)
189 sysfs_remove_link(&(slave->dev.kobj), "master");
190
186 return ret; 191 return ret;
187 192
188} 193}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a923bc4d5a1f..4046f97378c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2760,6 +2760,7 @@ load_error2:
2760 bp->port.pmf = 0; 2760 bp->port.pmf = 0;
2761load_error1: 2761load_error1:
2762 bnx2x_napi_disable(bp); 2762 bnx2x_napi_disable(bp);
2763 bnx2x_del_all_napi(bp);
2763 2764
2764 /* clear pf_load status, as it was already set */ 2765 /* clear pf_load status, as it was already set */
2765 if (IS_PF(bp)) 2766 if (IS_PF(bp))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 568205436a15..91ecd6a00d05 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -2139,12 +2139,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
2139 break; 2139 break;
2140 default: 2140 default:
2141 BNX2X_ERR("Non valid capability ID\n"); 2141 BNX2X_ERR("Non valid capability ID\n");
2142 rval = -EINVAL; 2142 rval = 1;
2143 break; 2143 break;
2144 } 2144 }
2145 } else { 2145 } else {
2146 DP(BNX2X_MSG_DCB, "DCB disabled\n"); 2146 DP(BNX2X_MSG_DCB, "DCB disabled\n");
2147 rval = -EINVAL; 2147 rval = 1;
2148 } 2148 }
2149 2149
2150 DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap); 2150 DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
@@ -2170,12 +2170,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
2170 break; 2170 break;
2171 default: 2171 default:
2172 BNX2X_ERR("Non valid TC-ID\n"); 2172 BNX2X_ERR("Non valid TC-ID\n");
2173 rval = -EINVAL; 2173 rval = 1;
2174 break; 2174 break;
2175 } 2175 }
2176 } else { 2176 } else {
2177 DP(BNX2X_MSG_DCB, "DCB disabled\n"); 2177 DP(BNX2X_MSG_DCB, "DCB disabled\n");
2178 rval = -EINVAL; 2178 rval = 1;
2179 } 2179 }
2180 2180
2181 return rval; 2181 return rval;
@@ -2188,7 +2188,7 @@ static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
2188 return -EINVAL; 2188 return -EINVAL;
2189} 2189}
2190 2190
2191static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) 2191static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
2192{ 2192{
2193 struct bnx2x *bp = netdev_priv(netdev); 2193 struct bnx2x *bp = netdev_priv(netdev);
2194 DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); 2194 DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
@@ -2390,12 +2390,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
2390 break; 2390 break;
2391 default: 2391 default:
2392 BNX2X_ERR("Non valid featrue-ID\n"); 2392 BNX2X_ERR("Non valid featrue-ID\n");
2393 rval = -EINVAL; 2393 rval = 1;
2394 break; 2394 break;
2395 } 2395 }
2396 } else { 2396 } else {
2397 DP(BNX2X_MSG_DCB, "DCB disabled\n"); 2397 DP(BNX2X_MSG_DCB, "DCB disabled\n");
2398 rval = -EINVAL; 2398 rval = 1;
2399 } 2399 }
2400 2400
2401 return rval; 2401 return rval;
@@ -2431,12 +2431,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
2431 break; 2431 break;
2432 default: 2432 default:
2433 BNX2X_ERR("Non valid featrue-ID\n"); 2433 BNX2X_ERR("Non valid featrue-ID\n");
2434 rval = -EINVAL; 2434 rval = 1;
2435 break; 2435 break;
2436 } 2436 }
2437 } else { 2437 } else {
2438 DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); 2438 DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
2439 rval = -EINVAL; 2439 rval = 1;
2440 } 2440 }
2441 2441
2442 return rval; 2442 return rval;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 364e37ecbc5c..198f6f1c9ad5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -459,8 +459,9 @@ struct bnx2x_fw_port_stats_old {
459 459
460#define UPDATE_QSTAT(s, t) \ 460#define UPDATE_QSTAT(s, t) \
461 do { \ 461 do { \
462 qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \
463 qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ 462 qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
463 qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \
464 + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \
464 } while (0) 465 } while (0)
465 466
466#define UPDATE_QSTAT_OLD(f) \ 467#define UPDATE_QSTAT_OLD(f) \
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 93729f942358..67d2663b3974 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4130,6 +4130,14 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
4130 tp->link_config.active_speed = tp->link_config.speed; 4130 tp->link_config.active_speed = tp->link_config.speed;
4131 tp->link_config.active_duplex = tp->link_config.duplex; 4131 tp->link_config.active_duplex = tp->link_config.duplex;
4132 4132
4133 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4134 /* With autoneg disabled, 5715 only links up when the
4135 * advertisement register has the configured speed
4136 * enabled.
4137 */
4138 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4139 }
4140
4133 bmcr = 0; 4141 bmcr = 0;
4134 switch (tp->link_config.speed) { 4142 switch (tp->link_config.speed) {
4135 default: 4143 default:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4ce62031f62f..8049268ce0f2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -497,8 +497,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
497} 497}
498 498
499#define EEPROM_STAT_ADDR 0x7bfc 499#define EEPROM_STAT_ADDR 0x7bfc
500#define VPD_BASE 0
501#define VPD_LEN 512 500#define VPD_LEN 512
501#define VPD_BASE 0x400
502#define VPD_BASE_OLD 0
502 503
503/** 504/**
504 * t4_seeprom_wp - enable/disable EEPROM write protection 505 * t4_seeprom_wp - enable/disable EEPROM write protection
@@ -524,7 +525,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
524int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 525int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
525{ 526{
526 u32 cclk_param, cclk_val; 527 u32 cclk_param, cclk_val;
527 int i, ret; 528 int i, ret, addr;
528 int ec, sn; 529 int ec, sn;
529 u8 *vpd, csum; 530 u8 *vpd, csum;
530 unsigned int vpdr_len, kw_offset, id_len; 531 unsigned int vpdr_len, kw_offset, id_len;
@@ -533,7 +534,12 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
533 if (!vpd) 534 if (!vpd)
534 return -ENOMEM; 535 return -ENOMEM;
535 536
536 ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd); 537 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
538 if (ret < 0)
539 goto out;
540 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
541
542 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
537 if (ret < 0) 543 if (ret < 0)
538 goto out; 544 goto out;
539 545
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 0c37fb2cc867..1df33c799c00 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -108,6 +108,7 @@ config TULIP_DM910X
108config DE4X5 108config DE4X5
109 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" 109 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
110 depends on (PCI || EISA) 110 depends on (PCI || EISA)
111 depends on VIRT_TO_BUS || ALPHA || PPC || SPARC
111 select CRC32 112 select CRC32
112 ---help--- 113 ---help---
113 This is support for the DIGITAL series of PCI/EISA Ethernet cards. 114 This is support for the DIGITAL series of PCI/EISA Ethernet cards.
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 069a155d16ed..911d0253dbb2 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -934,24 +934,28 @@ static void fec_enet_adjust_link(struct net_device *ndev)
934 goto spin_unlock; 934 goto spin_unlock;
935 } 935 }
936 936
937 /* Duplex link change */
938 if (phy_dev->link) { 937 if (phy_dev->link) {
939 if (fep->full_duplex != phy_dev->duplex) { 938 if (!fep->link) {
940 fec_restart(ndev, phy_dev->duplex);
941 /* prevent unnecessary second fec_restart() below */
942 fep->link = phy_dev->link; 939 fep->link = phy_dev->link;
943 status_change = 1; 940 status_change = 1;
944 } 941 }
945 }
946 942
947 /* Link on or off change */ 943 if (fep->full_duplex != phy_dev->duplex)
948 if (phy_dev->link != fep->link) { 944 status_change = 1;
949 fep->link = phy_dev->link; 945
950 if (phy_dev->link) 946 if (phy_dev->speed != fep->speed) {
947 fep->speed = phy_dev->speed;
948 status_change = 1;
949 }
950
951 /* if any of the above changed restart the FEC */
952 if (status_change)
951 fec_restart(ndev, phy_dev->duplex); 953 fec_restart(ndev, phy_dev->duplex);
952 else 954 } else {
955 if (fep->link) {
953 fec_stop(ndev); 956 fec_stop(ndev);
954 status_change = 1; 957 status_change = 1;
958 }
955 } 959 }
956 960
957spin_unlock: 961spin_unlock:
@@ -1328,7 +1332,7 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1328static void fec_enet_free_buffers(struct net_device *ndev) 1332static void fec_enet_free_buffers(struct net_device *ndev)
1329{ 1333{
1330 struct fec_enet_private *fep = netdev_priv(ndev); 1334 struct fec_enet_private *fep = netdev_priv(ndev);
1331 int i; 1335 unsigned int i;
1332 struct sk_buff *skb; 1336 struct sk_buff *skb;
1333 struct bufdesc *bdp; 1337 struct bufdesc *bdp;
1334 1338
@@ -1352,7 +1356,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
1352static int fec_enet_alloc_buffers(struct net_device *ndev) 1356static int fec_enet_alloc_buffers(struct net_device *ndev)
1353{ 1357{
1354 struct fec_enet_private *fep = netdev_priv(ndev); 1358 struct fec_enet_private *fep = netdev_priv(ndev);
1355 int i; 1359 unsigned int i;
1356 struct sk_buff *skb; 1360 struct sk_buff *skb;
1357 struct bufdesc *bdp; 1361 struct bufdesc *bdp;
1358 1362
@@ -1437,6 +1441,7 @@ fec_enet_close(struct net_device *ndev)
1437 struct fec_enet_private *fep = netdev_priv(ndev); 1441 struct fec_enet_private *fep = netdev_priv(ndev);
1438 1442
1439 /* Don't know what to do yet. */ 1443 /* Don't know what to do yet. */
1444 napi_disable(&fep->napi);
1440 fep->opened = 0; 1445 fep->opened = 0;
1441 netif_stop_queue(ndev); 1446 netif_stop_queue(ndev);
1442 fec_stop(ndev); 1447 fec_stop(ndev);
@@ -1593,7 +1598,7 @@ static int fec_enet_init(struct net_device *ndev)
1593 struct fec_enet_private *fep = netdev_priv(ndev); 1598 struct fec_enet_private *fep = netdev_priv(ndev);
1594 struct bufdesc *cbd_base; 1599 struct bufdesc *cbd_base;
1595 struct bufdesc *bdp; 1600 struct bufdesc *bdp;
1596 int i; 1601 unsigned int i;
1597 1602
1598 /* Allocate memory for buffer descriptors. */ 1603 /* Allocate memory for buffer descriptors. */
1599 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, 1604 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index f5390071efd0..eb4372962839 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -240,6 +240,7 @@ struct fec_enet_private {
240 phy_interface_t phy_interface; 240 phy_interface_t phy_interface;
241 int link; 241 int link;
242 int full_duplex; 242 int full_duplex;
243 int speed;
243 struct completion mdio_done; 244 struct completion mdio_done;
244 int irq[FEC_IRQ_NUM]; 245 int irq[FEC_IRQ_NUM];
245 int bufdesc_ex; 246 int bufdesc_ex;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 1f17ca0f2201..0d8df400a479 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -128,6 +128,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
128 128
129 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 129 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
130} 130}
131EXPORT_SYMBOL(fec_ptp_start_cyclecounter);
131 132
132/** 133/**
133 * fec_ptp_adjfreq - adjust ptp cycle frequency 134 * fec_ptp_adjfreq - adjust ptp cycle frequency
@@ -318,6 +319,7 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
318 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 319 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
319 -EFAULT : 0; 320 -EFAULT : 0;
320} 321}
322EXPORT_SYMBOL(fec_ptp_ioctl);
321 323
322/** 324/**
323 * fec_time_keep - call timecounter_read every second to avoid timer overrun 325 * fec_time_keep - call timecounter_read every second to avoid timer overrun
@@ -383,3 +385,4 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
383 pr_info("registered PHC device on %s\n", ndev->name); 385 pr_info("registered PHC device on %s\n", ndev->name);
384 } 386 }
385} 387}
388EXPORT_SYMBOL(fec_ptp_init);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index b64542acfa34..12b1d8480808 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1818,27 +1818,32 @@ out:
1818 **/ 1818 **/
1819void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) 1819void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
1820{ 1820{
1821 u32 dtxswc; 1821 u32 reg_val, reg_offset;
1822 1822
1823 switch (hw->mac.type) { 1823 switch (hw->mac.type) {
1824 case e1000_82576: 1824 case e1000_82576:
1825 reg_offset = E1000_DTXSWC;
1826 break;
1825 case e1000_i350: 1827 case e1000_i350:
1826 dtxswc = rd32(E1000_DTXSWC); 1828 reg_offset = E1000_TXSWC;
1827 if (enable) {
1828 dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1829 E1000_DTXSWC_VLAN_SPOOF_MASK);
1830 /* The PF can spoof - it has to in order to
1831 * support emulation mode NICs */
1832 dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1833 } else {
1834 dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1835 E1000_DTXSWC_VLAN_SPOOF_MASK);
1836 }
1837 wr32(E1000_DTXSWC, dtxswc);
1838 break; 1829 break;
1839 default: 1830 default:
1840 break; 1831 return;
1832 }
1833
1834 reg_val = rd32(reg_offset);
1835 if (enable) {
1836 reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1837 E1000_DTXSWC_VLAN_SPOOF_MASK);
1838 /* The PF can spoof - it has to in order to
1839 * support emulation mode NICs
1840 */
1841 reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1842 } else {
1843 reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1844 E1000_DTXSWC_VLAN_SPOOF_MASK);
1841 } 1845 }
1846 wr32(reg_offset, reg_val);
1842} 1847}
1843 1848
1844/** 1849/**
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 4623502054d5..0478a1abe541 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -39,7 +39,7 @@
39#include <linux/pci.h> 39#include <linux/pci.h>
40 40
41#ifdef CONFIG_IGB_HWMON 41#ifdef CONFIG_IGB_HWMON
42struct i2c_board_info i350_sensor_info = { 42static struct i2c_board_info i350_sensor_info = {
43 I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), 43 I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
44}; 44};
45 45
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 4dbd62968c7a..8496adfc6a68 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2542,8 +2542,8 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
2542 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) 2542 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2543 return; 2543 return;
2544 2544
2545 igb_enable_sriov(pdev, max_vfs);
2546 pci_sriov_set_totalvfs(pdev, 7); 2545 pci_sriov_set_totalvfs(pdev, 7);
2546 igb_enable_sriov(pdev, max_vfs);
2547 2547
2548#endif /* CONFIG_PCI_IOV */ 2548#endif /* CONFIG_PCI_IOV */
2549} 2549}
@@ -2652,7 +2652,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
2652 if (max_vfs > 7) { 2652 if (max_vfs > 7) {
2653 dev_warn(&pdev->dev, 2653 dev_warn(&pdev->dev,
2654 "Maximum of 7 VFs per PF, using max\n"); 2654 "Maximum of 7 VFs per PF, using max\n");
2655 adapter->vfs_allocated_count = 7; 2655 max_vfs = adapter->vfs_allocated_count = 7;
2656 } else 2656 } else
2657 adapter->vfs_allocated_count = max_vfs; 2657 adapter->vfs_allocated_count = max_vfs;
2658 if (adapter->vfs_allocated_count) 2658 if (adapter->vfs_allocated_count)
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 0987822359f0..0a237507ee85 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -740,7 +740,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
740 case e1000_82576: 740 case e1000_82576:
741 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); 741 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
742 adapter->ptp_caps.owner = THIS_MODULE; 742 adapter->ptp_caps.owner = THIS_MODULE;
743 adapter->ptp_caps.max_adj = 1000000000; 743 adapter->ptp_caps.max_adj = 999999881;
744 adapter->ptp_caps.n_ext_ts = 0; 744 adapter->ptp_caps.n_ext_ts = 0;
745 adapter->ptp_caps.pps = 0; 745 adapter->ptp_caps.pps = 0;
746 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; 746 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index c3db6cd69b68..2b6cb5ca48ee 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -944,9 +944,17 @@ free_queue_irqs:
944 free_irq(adapter->msix_entries[vector].vector, 944 free_irq(adapter->msix_entries[vector].vector,
945 adapter->q_vector[vector]); 945 adapter->q_vector[vector]);
946 } 946 }
947 pci_disable_msix(adapter->pdev); 947 /* This failure is non-recoverable - it indicates the system is
948 kfree(adapter->msix_entries); 948 * out of MSIX vector resources and the VF driver cannot run
949 adapter->msix_entries = NULL; 949 * without them. Set the number of msix vectors to zero
950 * indicating that not enough can be allocated. The error
951 * will be returned to the user indicating device open failed.
952 * Any further attempts to force the driver to open will also
953 * fail. The only way to recover is to unload the driver and
954 * reload it again. If the system has recovered some MSIX
955 * vectors then it may succeed.
956 */
957 adapter->num_msix_vectors = 0;
950 return err; 958 return err;
951} 959}
952 960
@@ -2572,6 +2580,15 @@ static int ixgbevf_open(struct net_device *netdev)
2572 struct ixgbe_hw *hw = &adapter->hw; 2580 struct ixgbe_hw *hw = &adapter->hw;
2573 int err; 2581 int err;
2574 2582
2583 /* A previous failure to open the device because of a lack of
2584 * available MSIX vector resources may have reset the number
2585 * of msix vectors variable to zero. The only way to recover
2586 * is to unload/reload the driver and hope that the system has
2587 * been able to recover some MSIX vector resources.
2588 */
2589 if (!adapter->num_msix_vectors)
2590 return -ENOMEM;
2591
2575 /* disallow open during test */ 2592 /* disallow open during test */
2576 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2593 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2577 return -EBUSY; 2594 return -EBUSY;
@@ -2628,7 +2645,6 @@ static int ixgbevf_open(struct net_device *netdev)
2628 2645
2629err_req_irq: 2646err_req_irq:
2630 ixgbevf_down(adapter); 2647 ixgbevf_down(adapter);
2631 ixgbevf_free_irq(adapter);
2632err_setup_rx: 2648err_setup_rx:
2633 ixgbevf_free_all_rx_resources(adapter); 2649 ixgbevf_free_all_rx_resources(adapter);
2634err_setup_tx: 2650err_setup_tx:
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6a2127489af7..bfdb06860397 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -769,7 +769,7 @@ ltq_etop_probe(struct platform_device *pdev)
769 return 0; 769 return 0;
770 770
771err_free: 771err_free:
772 kfree(dev); 772 free_netdev(dev);
773err_out: 773err_out:
774 return err; 774 return err;
775} 775}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 995d4b6d5c1e..f278b10ef714 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1637,6 +1637,17 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1637 /* Flush multicast filter */ 1637 /* Flush multicast filter */
1638 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1638 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1639 1639
1640 /* Remove flow steering rules for the port*/
1641 if (mdev->dev->caps.steering_mode ==
1642 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1643 ASSERT_RTNL();
1644 list_for_each_entry_safe(flow, tmp_flow,
1645 &priv->ethtool_list, list) {
1646 mlx4_flow_detach(mdev->dev, flow->id);
1647 list_del(&flow->list);
1648 }
1649 }
1650
1640 mlx4_en_destroy_drop_qp(priv); 1651 mlx4_en_destroy_drop_qp(priv);
1641 1652
1642 /* Free TX Rings */ 1653 /* Free TX Rings */
@@ -1657,17 +1668,6 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1657 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN)) 1668 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
1658 mdev->mac_removed[priv->port] = 1; 1669 mdev->mac_removed[priv->port] = 1;
1659 1670
1660 /* Remove flow steering rules for the port*/
1661 if (mdev->dev->caps.steering_mode ==
1662 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1663 ASSERT_RTNL();
1664 list_for_each_entry_safe(flow, tmp_flow,
1665 &priv->ethtool_list, list) {
1666 mlx4_flow_detach(mdev->dev, flow->id);
1667 list_del(&flow->list);
1668 }
1669 }
1670
1671 /* Free RX Rings */ 1671 /* Free RX Rings */
1672 for (i = 0; i < priv->rx_ring_num; i++) { 1672 for (i = 0; i < priv->rx_ring_num; i++) {
1673 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 1673 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 251ae2f93116..8e3123a1df88 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -771,7 +771,7 @@ int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
771 struct mlx4_slave_event_eq_info *event_eq = 771 struct mlx4_slave_event_eq_info *event_eq =
772 priv->mfunc.master.slave_state[slave].event_eq; 772 priv->mfunc.master.slave_state[slave].event_eq;
773 u32 in_modifier = vhcr->in_modifier; 773 u32 in_modifier = vhcr->in_modifier;
774 u32 eqn = in_modifier & 0x1FF; 774 u32 eqn = in_modifier & 0x3FF;
775 u64 in_param = vhcr->in_param; 775 u64 in_param = vhcr->in_param;
776 int err = 0; 776 int err = 0;
777 int i; 777 int i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 2995687f1aee..1391b52f443a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -99,6 +99,7 @@ struct res_qp {
99 struct list_head mcg_list; 99 struct list_head mcg_list;
100 spinlock_t mcg_spl; 100 spinlock_t mcg_spl;
101 int local_qpn; 101 int local_qpn;
102 atomic_t ref_count;
102}; 103};
103 104
104enum res_mtt_states { 105enum res_mtt_states {
@@ -197,6 +198,7 @@ enum res_fs_rule_states {
197 198
198struct res_fs_rule { 199struct res_fs_rule {
199 struct res_common com; 200 struct res_common com;
201 int qpn;
200}; 202};
201 203
202static void *res_tracker_lookup(struct rb_root *root, u64 res_id) 204static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
@@ -355,7 +357,7 @@ static int mpt_mask(struct mlx4_dev *dev)
355 return dev->caps.num_mpts - 1; 357 return dev->caps.num_mpts - 1;
356} 358}
357 359
358static void *find_res(struct mlx4_dev *dev, int res_id, 360static void *find_res(struct mlx4_dev *dev, u64 res_id,
359 enum mlx4_resource type) 361 enum mlx4_resource type)
360{ 362{
361 struct mlx4_priv *priv = mlx4_priv(dev); 363 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -447,6 +449,7 @@ static struct res_common *alloc_qp_tr(int id)
447 ret->local_qpn = id; 449 ret->local_qpn = id;
448 INIT_LIST_HEAD(&ret->mcg_list); 450 INIT_LIST_HEAD(&ret->mcg_list);
449 spin_lock_init(&ret->mcg_spl); 451 spin_lock_init(&ret->mcg_spl);
452 atomic_set(&ret->ref_count, 0);
450 453
451 return &ret->com; 454 return &ret->com;
452} 455}
@@ -554,7 +557,7 @@ static struct res_common *alloc_xrcdn_tr(int id)
554 return &ret->com; 557 return &ret->com;
555} 558}
556 559
557static struct res_common *alloc_fs_rule_tr(u64 id) 560static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
558{ 561{
559 struct res_fs_rule *ret; 562 struct res_fs_rule *ret;
560 563
@@ -564,7 +567,7 @@ static struct res_common *alloc_fs_rule_tr(u64 id)
564 567
565 ret->com.res_id = id; 568 ret->com.res_id = id;
566 ret->com.state = RES_FS_RULE_ALLOCATED; 569 ret->com.state = RES_FS_RULE_ALLOCATED;
567 570 ret->qpn = qpn;
568 return &ret->com; 571 return &ret->com;
569} 572}
570 573
@@ -602,7 +605,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
602 ret = alloc_xrcdn_tr(id); 605 ret = alloc_xrcdn_tr(id);
603 break; 606 break;
604 case RES_FS_RULE: 607 case RES_FS_RULE:
605 ret = alloc_fs_rule_tr(id); 608 ret = alloc_fs_rule_tr(id, extra);
606 break; 609 break;
607 default: 610 default:
608 return NULL; 611 return NULL;
@@ -671,10 +674,14 @@ undo:
671 674
672static int remove_qp_ok(struct res_qp *res) 675static int remove_qp_ok(struct res_qp *res)
673{ 676{
674 if (res->com.state == RES_QP_BUSY) 677 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
678 !list_empty(&res->mcg_list)) {
679 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
680 res->com.state, atomic_read(&res->ref_count));
675 return -EBUSY; 681 return -EBUSY;
676 else if (res->com.state != RES_QP_RESERVED) 682 } else if (res->com.state != RES_QP_RESERVED) {
677 return -EPERM; 683 return -EPERM;
684 }
678 685
679 return 0; 686 return 0;
680} 687}
@@ -3124,6 +3131,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3124 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; 3131 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3125 int err; 3132 int err;
3126 int qpn; 3133 int qpn;
3134 struct res_qp *rqp;
3127 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 3135 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3128 struct _rule_hw *rule_header; 3136 struct _rule_hw *rule_header;
3129 int header_id; 3137 int header_id;
@@ -3134,7 +3142,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3134 3142
3135 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 3143 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3136 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 3144 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3137 err = get_res(dev, slave, qpn, RES_QP, NULL); 3145 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3138 if (err) { 3146 if (err) {
3139 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); 3147 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3140 return err; 3148 return err;
@@ -3175,14 +3183,16 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3175 if (err) 3183 if (err)
3176 goto err_put; 3184 goto err_put;
3177 3185
3178 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0); 3186 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3179 if (err) { 3187 if (err) {
3180 mlx4_err(dev, "Fail to add flow steering resources.\n "); 3188 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3181 /* detach rule*/ 3189 /* detach rule*/
3182 mlx4_cmd(dev, vhcr->out_param, 0, 0, 3190 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3183 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 3191 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3184 MLX4_CMD_NATIVE); 3192 MLX4_CMD_NATIVE);
3193 goto err_put;
3185 } 3194 }
3195 atomic_inc(&rqp->ref_count);
3186err_put: 3196err_put:
3187 put_res(dev, slave, qpn, RES_QP); 3197 put_res(dev, slave, qpn, RES_QP);
3188 return err; 3198 return err;
@@ -3195,20 +3205,35 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3195 struct mlx4_cmd_info *cmd) 3205 struct mlx4_cmd_info *cmd)
3196{ 3206{
3197 int err; 3207 int err;
3208 struct res_qp *rqp;
3209 struct res_fs_rule *rrule;
3198 3210
3199 if (dev->caps.steering_mode != 3211 if (dev->caps.steering_mode !=
3200 MLX4_STEERING_MODE_DEVICE_MANAGED) 3212 MLX4_STEERING_MODE_DEVICE_MANAGED)
3201 return -EOPNOTSUPP; 3213 return -EOPNOTSUPP;
3202 3214
3215 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3216 if (err)
3217 return err;
3218 /* Release the rule form busy state before removal */
3219 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3220 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3221 if (err)
3222 return err;
3223
3203 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); 3224 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3204 if (err) { 3225 if (err) {
3205 mlx4_err(dev, "Fail to remove flow steering resources.\n "); 3226 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3206 return err; 3227 goto out;
3207 } 3228 }
3208 3229
3209 err = mlx4_cmd(dev, vhcr->in_param, 0, 0, 3230 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3210 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 3231 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3211 MLX4_CMD_NATIVE); 3232 MLX4_CMD_NATIVE);
3233 if (!err)
3234 atomic_dec(&rqp->ref_count);
3235out:
3236 put_res(dev, slave, rrule->qpn, RES_QP);
3212 return err; 3237 return err;
3213} 3238}
3214 3239
@@ -3806,6 +3831,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3806 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 3831 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3807 /*VLAN*/ 3832 /*VLAN*/
3808 rem_slave_macs(dev, slave); 3833 rem_slave_macs(dev, slave);
3834 rem_slave_fs_rule(dev, slave);
3809 rem_slave_qps(dev, slave); 3835 rem_slave_qps(dev, slave);
3810 rem_slave_srqs(dev, slave); 3836 rem_slave_srqs(dev, slave);
3811 rem_slave_cqs(dev, slave); 3837 rem_slave_cqs(dev, slave);
@@ -3814,6 +3840,5 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3814 rem_slave_mtts(dev, slave); 3840 rem_slave_mtts(dev, slave);
3815 rem_slave_counters(dev, slave); 3841 rem_slave_counters(dev, slave);
3816 rem_slave_xrcdns(dev, slave); 3842 rem_slave_xrcdns(dev, slave);
3817 rem_slave_fs_rule(dev, slave);
3818 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 3843 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3819} 3844}
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index c4122c86f829..efa29b712d5f 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1472,7 +1472,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1472 } 1472 }
1473 platform_set_drvdata(pdev, ndev); 1473 platform_set_drvdata(pdev, ndev);
1474 1474
1475 if (lpc_mii_init(pldat) != 0) 1475 ret = lpc_mii_init(pldat);
1476 if (ret)
1476 goto err_out_unregister_netdev; 1477 goto err_out_unregister_netdev;
1477 1478
1478 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", 1479 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 39ab4d09faaa..73ce7dd6b954 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1726,9 +1726,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1726 1726
1727 skb->protocol = eth_type_trans(skb, netdev); 1727 skb->protocol = eth_type_trans(skb, netdev);
1728 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) 1728 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1729 skb->ip_summed = CHECKSUM_NONE;
1730 else
1731 skb->ip_summed = CHECKSUM_UNNECESSARY; 1729 skb->ip_summed = CHECKSUM_UNNECESSARY;
1730 else
1731 skb->ip_summed = CHECKSUM_NONE;
1732 1732
1733 napi_gro_receive(&adapter->napi, skb); 1733 napi_gro_receive(&adapter->napi, skb);
1734 (*work_done)++; 1734 (*work_done)++;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 33e96176e4d8..bf5e3cf97c4d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2220,6 +2220,7 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2220/* MDIO bus release function */ 2220/* MDIO bus release function */
2221static int sh_mdio_release(struct net_device *ndev) 2221static int sh_mdio_release(struct net_device *ndev)
2222{ 2222{
2223 struct sh_eth_private *mdp = netdev_priv(ndev);
2223 struct mii_bus *bus = dev_get_drvdata(&ndev->dev); 2224 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2224 2225
2225 /* unregister mdio bus */ 2226 /* unregister mdio bus */
@@ -2234,6 +2235,9 @@ static int sh_mdio_release(struct net_device *ndev)
2234 /* free bitbang info */ 2235 /* free bitbang info */
2235 free_mdio_bitbang(bus); 2236 free_mdio_bitbang(bus);
2236 2237
2238 /* free bitbang memory */
2239 kfree(mdp->bitbang);
2240
2237 return 0; 2241 return 0;
2238} 2242}
2239 2243
@@ -2262,6 +2266,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2262 bitbang->ctrl.ops = &bb_ops; 2266 bitbang->ctrl.ops = &bb_ops;
2263 2267
2264 /* MII controller setting */ 2268 /* MII controller setting */
2269 mdp->bitbang = bitbang;
2265 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2270 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2266 if (!mdp->mii_bus) { 2271 if (!mdp->mii_bus) {
2267 ret = -ENOMEM; 2272 ret = -ENOMEM;
@@ -2441,6 +2446,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2441 } 2446 }
2442 mdp->tsu_addr = ioremap(rtsu->start, 2447 mdp->tsu_addr = ioremap(rtsu->start,
2443 resource_size(rtsu)); 2448 resource_size(rtsu));
2449 if (mdp->tsu_addr == NULL) {
2450 ret = -ENOMEM;
2451 dev_err(&pdev->dev, "TSU ioremap failed.\n");
2452 goto out_release;
2453 }
2444 mdp->port = devno % 2; 2454 mdp->port = devno % 2;
2445 ndev->features = NETIF_F_HW_VLAN_FILTER; 2455 ndev->features = NETIF_F_HW_VLAN_FILTER;
2446 } 2456 }
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index bae84fd2e73a..e6655678458e 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -705,6 +705,7 @@ struct sh_eth_private {
705 const u16 *reg_offset; 705 const u16 *reg_offset;
706 void __iomem *addr; 706 void __iomem *addr;
707 void __iomem *tsu_addr; 707 void __iomem *tsu_addr;
708 struct bb_info *bitbang;
708 u32 num_rx_ring; 709 u32 num_rx_ring;
709 u32 num_tx_ring; 710 u32 num_tx_ring;
710 dma_addr_t rx_desc_dma; 711 dma_addr_t rx_desc_dma;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 0ad790cc473c..eaa8e874a3cb 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -376,7 +376,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
376 return false; 376 return false;
377 377
378 tx_queue->empty_read_count = 0; 378 tx_queue->empty_read_count = 0;
379 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; 379 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
380 && tx_queue->write_count - write_count == 1;
380} 381}
381 382
382/* For each entry inserted into the software descriptor ring, create a 383/* For each entry inserted into the software descriptor ring, create a
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 01ffbc486982..df32a090d08e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -905,7 +905,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
905 /* If there is no more tx desc left free then we need to 905 /* If there is no more tx desc left free then we need to
906 * tell the kernel to stop sending us tx frames. 906 * tell the kernel to stop sending us tx frames.
907 */ 907 */
908 if (unlikely(cpdma_check_free_tx_desc(priv->txch))) 908 if (unlikely(!cpdma_check_free_tx_desc(priv->txch)))
909 netif_stop_queue(ndev); 909 netif_stop_queue(ndev);
910 910
911 return NETDEV_TX_OK; 911 return NETDEV_TX_OK;
@@ -1364,7 +1364,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1364 struct platform_device *mdio; 1364 struct platform_device *mdio;
1365 1365
1366 parp = of_get_property(slave_node, "phy_id", &lenp); 1366 parp = of_get_property(slave_node, "phy_id", &lenp);
1367 if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) { 1367 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
1368 pr_err("Missing slave[%d] phy_id property\n", i); 1368 pr_err("Missing slave[%d] phy_id property\n", i);
1369 ret = -EINVAL; 1369 ret = -EINVAL;
1370 goto error_ret; 1370 goto error_ret;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 52c05366599a..ae1b77aa199f 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1102,7 +1102,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
1102 /* If there is no more tx desc left free then we need to 1102 /* If there is no more tx desc left free then we need to
1103 * tell the kernel to stop sending us tx frames. 1103 * tell the kernel to stop sending us tx frames.
1104 */ 1104 */
1105 if (unlikely(cpdma_check_free_tx_desc(priv->txchan))) 1105 if (unlikely(!cpdma_check_free_tx_desc(priv->txchan)))
1106 netif_stop_queue(ndev); 1106 netif_stop_queue(ndev);
1107 1107
1108 return NETDEV_TX_OK; 1108 return NETDEV_TX_OK;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 37add21a3d7d..59ac143dec25 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -666,6 +666,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
666 goto done; 666 goto done;
667 667
668 spin_lock_irqsave(&target_list_lock, flags); 668 spin_lock_irqsave(&target_list_lock, flags);
669restart:
669 list_for_each_entry(nt, &target_list, list) { 670 list_for_each_entry(nt, &target_list, list) {
670 netconsole_target_get(nt); 671 netconsole_target_get(nt);
671 if (nt->np.dev == dev) { 672 if (nt->np.dev == dev) {
@@ -678,15 +679,17 @@ static int netconsole_netdev_event(struct notifier_block *this,
678 case NETDEV_UNREGISTER: 679 case NETDEV_UNREGISTER:
679 /* 680 /*
680 * rtnl_lock already held 681 * rtnl_lock already held
682 * we might sleep in __netpoll_cleanup()
681 */ 683 */
682 if (nt->np.dev) { 684 spin_unlock_irqrestore(&target_list_lock, flags);
683 __netpoll_cleanup(&nt->np); 685 __netpoll_cleanup(&nt->np);
684 dev_put(nt->np.dev); 686 spin_lock_irqsave(&target_list_lock, flags);
685 nt->np.dev = NULL; 687 dev_put(nt->np.dev);
686 } 688 nt->np.dev = NULL;
687 nt->enabled = 0; 689 nt->enabled = 0;
688 stopped = true; 690 stopped = true;
689 break; 691 netconsole_target_put(nt);
692 goto restart;
690 } 693 }
691 } 694 }
692 netconsole_target_put(nt); 695 netconsole_target_put(nt);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3b6e9b83342d..7c769d8e25ad 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -268,7 +268,7 @@ config USB_NET_SMSC75XX
268 select CRC16 268 select CRC16
269 select CRC32 269 select CRC32
270 help 270 help
271 This option adds support for SMSC LAN95XX based USB 2.0 271 This option adds support for SMSC LAN75XX based USB 2.0
272 Gigabit Ethernet adapters. 272 Gigabit Ethernet adapters.
273 273
274config USB_NET_SMSC95XX 274config USB_NET_SMSC95XX
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 248d2dc765a5..16c842997291 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -68,18 +68,9 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
68 struct cdc_ncm_ctx *ctx; 68 struct cdc_ncm_ctx *ctx;
69 struct usb_driver *subdriver = ERR_PTR(-ENODEV); 69 struct usb_driver *subdriver = ERR_PTR(-ENODEV);
70 int ret = -ENODEV; 70 int ret = -ENODEV;
71 u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM; 71 u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf);
72 struct cdc_mbim_state *info = (void *)&dev->data; 72 struct cdc_mbim_state *info = (void *)&dev->data;
73 73
74 /* see if interface supports MBIM alternate setting */
75 if (intf->num_altsetting == 2) {
76 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
77 usb_set_interface(dev->udev,
78 intf->cur_altsetting->desc.bInterfaceNumber,
79 CDC_NCM_COMM_ALTSETTING_MBIM);
80 data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
81 }
82
83 /* Probably NCM, defer for cdc_ncm_bind */ 74 /* Probably NCM, defer for cdc_ncm_bind */
84 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 75 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
85 goto err; 76 goto err;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 61b74a2b89ac..4709fa3497cf 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -55,6 +55,14 @@
55 55
56#define DRIVER_VERSION "14-Mar-2012" 56#define DRIVER_VERSION "14-Mar-2012"
57 57
58#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
59static bool prefer_mbim = true;
60#else
61static bool prefer_mbim;
62#endif
63module_param(prefer_mbim, bool, S_IRUGO | S_IWUSR);
64MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions");
65
58static void cdc_ncm_txpath_bh(unsigned long param); 66static void cdc_ncm_txpath_bh(unsigned long param);
59static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); 67static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
60static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); 68static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
@@ -550,9 +558,12 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
550} 558}
551EXPORT_SYMBOL_GPL(cdc_ncm_unbind); 559EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
552 560
553static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) 561/* Select the MBIM altsetting iff it is preferred and available,
562 * returning the number of the corresponding data interface altsetting
563 */
564u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
554{ 565{
555 int ret; 566 struct usb_host_interface *alt;
556 567
557 /* The MBIM spec defines a NCM compatible default altsetting, 568 /* The MBIM spec defines a NCM compatible default altsetting,
558 * which we may have matched: 569 * which we may have matched:
@@ -568,23 +579,27 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
568 * endpoint descriptors, shall be constructed according to 579 * endpoint descriptors, shall be constructed according to
569 * the rules given in section 6 (USB Device Model) of this 580 * the rules given in section 6 (USB Device Model) of this
570 * specification." 581 * specification."
571 *
572 * Do not bind to such interfaces, allowing cdc_mbim to handle
573 * them
574 */ 582 */
575#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) 583 if (prefer_mbim && intf->num_altsetting == 2) {
576 if ((intf->num_altsetting == 2) && 584 alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM);
577 !usb_set_interface(dev->udev, 585 if (alt && cdc_ncm_comm_intf_is_mbim(alt) &&
578 intf->cur_altsetting->desc.bInterfaceNumber, 586 !usb_set_interface(dev->udev,
579 CDC_NCM_COMM_ALTSETTING_MBIM)) { 587 intf->cur_altsetting->desc.bInterfaceNumber,
580 if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 588 CDC_NCM_COMM_ALTSETTING_MBIM))
581 return -ENODEV; 589 return CDC_NCM_DATA_ALTSETTING_MBIM;
582 else
583 usb_set_interface(dev->udev,
584 intf->cur_altsetting->desc.bInterfaceNumber,
585 CDC_NCM_COMM_ALTSETTING_NCM);
586 } 590 }
587#endif 591 return CDC_NCM_DATA_ALTSETTING_NCM;
592}
593EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
594
595static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
596{
597 int ret;
598
599 /* MBIM backwards compatible function? */
600 cdc_ncm_select_altsetting(dev, intf);
601 if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
602 return -ENODEV;
588 603
589 /* NCM data altsetting is always 1 */ 604 /* NCM data altsetting is always 1 */
590 ret = cdc_ncm_bind_common(dev, intf, 1); 605 ret = cdc_ncm_bind_common(dev, intf, 1);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index efb5c7c33a28..968d5d50751d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -139,16 +139,9 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
139 139
140 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); 140 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
141 141
142 /* control and data is shared? */ 142 /* set up initial state */
143 if (intf->cur_altsetting->desc.bNumEndpoints == 3) { 143 info->control = intf;
144 info->control = intf; 144 info->data = intf;
145 info->data = intf;
146 goto shared;
147 }
148
149 /* else require a single interrupt status endpoint on control intf */
150 if (intf->cur_altsetting->desc.bNumEndpoints != 1)
151 goto err;
152 145
153 /* and a number of CDC descriptors */ 146 /* and a number of CDC descriptors */
154 while (len > 3) { 147 while (len > 3) {
@@ -207,25 +200,14 @@ next_desc:
207 buf += h->bLength; 200 buf += h->bLength;
208 } 201 }
209 202
210 /* did we find all the required ones? */ 203 /* Use separate control and data interfaces if we found a CDC Union */
211 if (!(found & (1 << USB_CDC_HEADER_TYPE)) || 204 if (cdc_union) {
212 !(found & (1 << USB_CDC_UNION_TYPE))) { 205 info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
213 dev_err(&intf->dev, "CDC functional descriptors missing\n"); 206 if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) {
214 goto err; 207 dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n",
215 } 208 cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0);
216 209 goto err;
217 /* verify CDC Union */ 210 }
218 if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) {
219 dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0);
220 goto err;
221 }
222
223 /* need to save these for unbind */
224 info->control = intf;
225 info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
226 if (!info->data) {
227 dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0);
228 goto err;
229 } 211 }
230 212
231 /* errors aren't fatal - we can live with the dynamic address */ 213 /* errors aren't fatal - we can live with the dynamic address */
@@ -235,11 +217,12 @@ next_desc:
235 } 217 }
236 218
237 /* claim data interface and set it up */ 219 /* claim data interface and set it up */
238 status = usb_driver_claim_interface(driver, info->data, dev); 220 if (info->control != info->data) {
239 if (status < 0) 221 status = usb_driver_claim_interface(driver, info->data, dev);
240 goto err; 222 if (status < 0)
223 goto err;
224 }
241 225
242shared:
243 status = qmi_wwan_register_subdriver(dev); 226 status = qmi_wwan_register_subdriver(dev);
244 if (status < 0 && info->control != info->data) { 227 if (status < 0 && info->control != info->data) {
245 usb_set_intfdata(info->data, NULL); 228 usb_set_intfdata(info->data, NULL);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 4cc13940c895..f76c3ca07a45 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1023,6 +1023,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
1023 AR_PHY_AGC_CONTROL_FLTR_CAL | 1023 AR_PHY_AGC_CONTROL_FLTR_CAL |
1024 AR_PHY_AGC_CONTROL_PKDET_CAL; 1024 AR_PHY_AGC_CONTROL_PKDET_CAL;
1025 1025
1026 /* Use chip chainmask only for calibration */
1026 ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask); 1027 ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
1027 1028
1028 if (rtt) { 1029 if (rtt) {
@@ -1150,6 +1151,9 @@ skip_tx_iqcal:
1150 ar9003_hw_rtt_disable(ah); 1151 ar9003_hw_rtt_disable(ah);
1151 } 1152 }
1152 1153
1154 /* Revert chainmask to runtime parameters */
1155 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
1156
1153 /* Initialize list pointers */ 1157 /* Initialize list pointers */
1154 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; 1158 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
1155 1159
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index ade3afb21f91..39c84ecf6a42 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -28,21 +28,21 @@ void ath_tx_complete_poll_work(struct work_struct *work)
28 int i; 28 int i;
29 bool needreset = false; 29 bool needreset = false;
30 30
31 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 31 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
32 if (ATH_TXQ_SETUP(sc, i)) { 32 txq = sc->tx.txq_map[i];
33 txq = &sc->tx.txq[i]; 33
34 ath_txq_lock(sc, txq); 34 ath_txq_lock(sc, txq);
35 if (txq->axq_depth) { 35 if (txq->axq_depth) {
36 if (txq->axq_tx_inprogress) { 36 if (txq->axq_tx_inprogress) {
37 needreset = true; 37 needreset = true;
38 ath_txq_unlock(sc, txq); 38 ath_txq_unlock(sc, txq);
39 break; 39 break;
40 } else { 40 } else {
41 txq->axq_tx_inprogress = true; 41 txq->axq_tx_inprogress = true;
42 }
43 } 42 }
44 ath_txq_unlock_complete(sc, txq);
45 } 43 }
44 ath_txq_unlock_complete(sc, txq);
45 }
46 46
47 if (needreset) { 47 if (needreset) {
48 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, 48 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 3630a41df50d..c353b5f19c8c 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -475,6 +475,7 @@ il3945_tx_skb(struct il_priv *il,
475 dma_addr_t txcmd_phys; 475 dma_addr_t txcmd_phys;
476 int txq_id = skb_get_queue_mapping(skb); 476 int txq_id = skb_get_queue_mapping(skb);
477 u16 len, idx, hdr_len; 477 u16 len, idx, hdr_len;
478 u16 firstlen, secondlen;
478 u8 id; 479 u8 id;
479 u8 unicast; 480 u8 unicast;
480 u8 sta_id; 481 u8 sta_id;
@@ -589,21 +590,22 @@ il3945_tx_skb(struct il_priv *il,
589 len = 590 len =
590 sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) + 591 sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
591 hdr_len; 592 hdr_len;
592 len = (len + 3) & ~3; 593 firstlen = (len + 3) & ~3;
593 594
594 /* Physical address of this Tx command's header (not MAC header!), 595 /* Physical address of this Tx command's header (not MAC header!),
595 * within command buffer array. */ 596 * within command buffer array. */
596 txcmd_phys = 597 txcmd_phys =
597 pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE); 598 pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
599 PCI_DMA_TODEVICE);
598 if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys))) 600 if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
599 goto drop_unlock; 601 goto drop_unlock;
600 602
601 /* Set up TFD's 2nd entry to point directly to remainder of skb, 603 /* Set up TFD's 2nd entry to point directly to remainder of skb,
602 * if any (802.11 null frames have no payload). */ 604 * if any (802.11 null frames have no payload). */
603 len = skb->len - hdr_len; 605 secondlen = skb->len - hdr_len;
604 if (len) { 606 if (secondlen > 0) {
605 phys_addr = 607 phys_addr =
606 pci_map_single(il->pci_dev, skb->data + hdr_len, len, 608 pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
607 PCI_DMA_TODEVICE); 609 PCI_DMA_TODEVICE);
608 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) 610 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
609 goto drop_unlock; 611 goto drop_unlock;
@@ -611,12 +613,12 @@ il3945_tx_skb(struct il_priv *il,
611 613
612 /* Add buffer containing Tx command and MAC(!) header to TFD's 614 /* Add buffer containing Tx command and MAC(!) header to TFD's
613 * first entry */ 615 * first entry */
614 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0); 616 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
615 dma_unmap_addr_set(out_meta, mapping, txcmd_phys); 617 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
616 dma_unmap_len_set(out_meta, len, len); 618 dma_unmap_len_set(out_meta, len, firstlen);
617 if (len) 619 if (secondlen > 0)
618 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0, 620 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0,
619 U32_PAD(len)); 621 U32_PAD(secondlen));
620 622
621 if (!ieee80211_has_morefrags(hdr->frame_control)) { 623 if (!ieee80211_has_morefrags(hdr->frame_control)) {
622 txq->need_update = 1; 624 txq->need_update = 1;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 20a6c5555873..b5c8b962ce12 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -157,6 +157,20 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
157 return -1; 157 return -1;
158 } 158 }
159 159
160 cmd_code = le16_to_cpu(host_cmd->command);
161 cmd_size = le16_to_cpu(host_cmd->size);
162
163 if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET &&
164 cmd_code != HostCmd_CMD_FUNC_SHUTDOWN &&
165 cmd_code != HostCmd_CMD_FUNC_INIT) {
166 dev_err(adapter->dev,
167 "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
168 cmd_code);
169 mwifiex_complete_cmd(adapter, cmd_node);
170 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
171 return -1;
172 }
173
160 /* Set command sequence number */ 174 /* Set command sequence number */
161 adapter->seq_num++; 175 adapter->seq_num++;
162 host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO 176 host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
@@ -168,9 +182,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
168 adapter->curr_cmd = cmd_node; 182 adapter->curr_cmd = cmd_node;
169 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); 183 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
170 184
171 cmd_code = le16_to_cpu(host_cmd->command);
172 cmd_size = le16_to_cpu(host_cmd->size);
173
174 /* Adjust skb length */ 185 /* Adjust skb length */
175 if (cmd_node->cmd_skb->len > cmd_size) 186 if (cmd_node->cmd_skb->len > cmd_size)
176 /* 187 /*
@@ -484,8 +495,6 @@ int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
484 495
485 ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid, 496 ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid,
486 data_buf); 497 data_buf);
487 if (!ret)
488 ret = mwifiex_wait_queue_complete(adapter);
489 498
490 return ret; 499 return ret;
491} 500}
@@ -588,9 +597,10 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
588 if (cmd_no == HostCmd_CMD_802_11_SCAN) { 597 if (cmd_no == HostCmd_CMD_802_11_SCAN) {
589 mwifiex_queue_scan_cmd(priv, cmd_node); 598 mwifiex_queue_scan_cmd(priv, cmd_node);
590 } else { 599 } else {
591 adapter->cmd_queued = cmd_node;
592 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); 600 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
593 queue_work(adapter->workqueue, &adapter->main_work); 601 queue_work(adapter->workqueue, &adapter->main_work);
602 if (cmd_node->wait_q_enabled)
603 ret = mwifiex_wait_queue_complete(adapter, cmd_node);
594 } 604 }
595 605
596 return ret; 606 return ret;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index e38aa9b3663d..0ff4c37ab42a 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -709,6 +709,14 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
709 return ret; 709 return ret;
710 } 710 }
711 711
712 /* cancel current command */
713 if (adapter->curr_cmd) {
714 dev_warn(adapter->dev, "curr_cmd is still in processing\n");
715 del_timer(&adapter->cmd_timer);
716 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
717 adapter->curr_cmd = NULL;
718 }
719
712 /* shut down mwifiex */ 720 /* shut down mwifiex */
713 dev_dbg(adapter->dev, "info: shutdown mwifiex...\n"); 721 dev_dbg(adapter->dev, "info: shutdown mwifiex...\n");
714 722
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 246aa62a4817..2fe0ceba4400 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1117,10 +1117,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
1117 adhoc_join->bss_descriptor.bssid, 1117 adhoc_join->bss_descriptor.bssid,
1118 adhoc_join->bss_descriptor.ssid); 1118 adhoc_join->bss_descriptor.ssid);
1119 1119
1120 for (i = 0; bss_desc->supported_rates[i] && 1120 for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
1121 i < MWIFIEX_SUPPORTED_RATES; 1121 bss_desc->supported_rates[i]; i++)
1122 i++) 1122 ;
1123 ;
1124 rates_size = i; 1123 rates_size = i;
1125 1124
1126 /* Copy Data Rates from the Rates recorded in scan response */ 1125 /* Copy Data Rates from the Rates recorded in scan response */
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 553adfb0aa81..7035ade9af74 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -723,7 +723,6 @@ struct mwifiex_adapter {
723 u16 cmd_wait_q_required; 723 u16 cmd_wait_q_required;
724 struct mwifiex_wait_queue cmd_wait_q; 724 struct mwifiex_wait_queue cmd_wait_q;
725 u8 scan_wait_q_woken; 725 u8 scan_wait_q_woken;
726 struct cmd_ctrl_node *cmd_queued;
727 spinlock_t queue_lock; /* lock for tx queues */ 726 spinlock_t queue_lock; /* lock for tx queues */
728 struct completion fw_load; 727 struct completion fw_load;
729 u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; 728 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
@@ -1018,7 +1017,8 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
1018 struct mwifiex_multicast_list *mcast_list); 1017 struct mwifiex_multicast_list *mcast_list);
1019int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist, 1018int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
1020 struct net_device *dev); 1019 struct net_device *dev);
1021int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter); 1020int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
1021 struct cmd_ctrl_node *cmd_queued);
1022int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, 1022int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
1023 struct cfg80211_ssid *req_ssid); 1023 struct cfg80211_ssid *req_ssid);
1024int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type); 1024int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index bb60c2754a97..d215b4d3c51b 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1388,10 +1388,13 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
1388 list_del(&cmd_node->list); 1388 list_del(&cmd_node->list);
1389 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 1389 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1390 flags); 1390 flags);
1391 adapter->cmd_queued = cmd_node;
1392 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, 1391 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
1393 true); 1392 true);
1394 queue_work(adapter->workqueue, &adapter->main_work); 1393 queue_work(adapter->workqueue, &adapter->main_work);
1394
1395 /* Perform internal scan synchronously */
1396 if (!priv->scan_request)
1397 mwifiex_wait_queue_complete(adapter, cmd_node);
1395 } else { 1398 } else {
1396 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 1399 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1397 flags); 1400 flags);
@@ -1946,9 +1949,6 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
1946 /* Normal scan */ 1949 /* Normal scan */
1947 ret = mwifiex_scan_networks(priv, NULL); 1950 ret = mwifiex_scan_networks(priv, NULL);
1948 1951
1949 if (!ret)
1950 ret = mwifiex_wait_queue_complete(priv->adapter);
1951
1952 up(&priv->async_sem); 1952 up(&priv->async_sem);
1953 1953
1954 return ret; 1954 return ret;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 9f33c92c90f5..13100f8de3db 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -54,16 +54,10 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
54 * This function waits on a cmd wait queue. It also cancels the pending 54 * This function waits on a cmd wait queue. It also cancels the pending
55 * request after waking up, in case of errors. 55 * request after waking up, in case of errors.
56 */ 56 */
57int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter) 57int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
58 struct cmd_ctrl_node *cmd_queued)
58{ 59{
59 int status; 60 int status;
60 struct cmd_ctrl_node *cmd_queued;
61
62 if (!adapter->cmd_queued)
63 return 0;
64
65 cmd_queued = adapter->cmd_queued;
66 adapter->cmd_queued = NULL;
67 61
68 dev_dbg(adapter->dev, "cmd pending\n"); 62 dev_dbg(adapter->dev, "cmd pending\n");
69 atomic_inc(&adapter->cmd_pending); 63 atomic_inc(&adapter->cmd_pending);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 44d6ead43341..2bf4efa33186 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -55,10 +55,10 @@ config RT61PCI
55 55
56config RT2800PCI 56config RT2800PCI
57 tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" 57 tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
58 depends on PCI || RALINK_RT288X || RALINK_RT305X 58 depends on PCI || SOC_RT288X || SOC_RT305X
59 select RT2800_LIB 59 select RT2800_LIB
60 select RT2X00_LIB_PCI if PCI 60 select RT2X00_LIB_PCI if PCI
61 select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X 61 select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
62 select RT2X00_LIB_FIRMWARE 62 select RT2X00_LIB_FIRMWARE
63 select RT2X00_LIB_CRYPTO 63 select RT2X00_LIB_CRYPTO
64 select CRC_CCITT 64 select CRC_CCITT
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 48a01aa21f1c..ded73da4de0b 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -89,7 +89,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
89 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 89 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
90} 90}
91 91
92#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 92#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
93static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) 93static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
94{ 94{
95 void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); 95 void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
@@ -107,7 +107,7 @@ static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
107{ 107{
108 return -ENOMEM; 108 return -ENOMEM;
109} 109}
110#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ 110#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
111 111
112#ifdef CONFIG_PCI 112#ifdef CONFIG_PCI
113static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 113static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@@ -1177,7 +1177,7 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
1177#endif /* CONFIG_PCI */ 1177#endif /* CONFIG_PCI */
1178MODULE_LICENSE("GPL"); 1178MODULE_LICENSE("GPL");
1179 1179
1180#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 1180#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
1181static int rt2800soc_probe(struct platform_device *pdev) 1181static int rt2800soc_probe(struct platform_device *pdev)
1182{ 1182{
1183 return rt2x00soc_probe(pdev, &rt2800pci_ops); 1183 return rt2x00soc_probe(pdev, &rt2800pci_ops);
@@ -1194,7 +1194,7 @@ static struct platform_driver rt2800soc_driver = {
1194 .suspend = rt2x00soc_suspend, 1194 .suspend = rt2x00soc_suspend,
1195 .resume = rt2x00soc_resume, 1195 .resume = rt2x00soc_resume,
1196}; 1196};
1197#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ 1197#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
1198 1198
1199#ifdef CONFIG_PCI 1199#ifdef CONFIG_PCI
1200static int rt2800pci_probe(struct pci_dev *pci_dev, 1200static int rt2800pci_probe(struct pci_dev *pci_dev,
@@ -1217,7 +1217,7 @@ static int __init rt2800pci_init(void)
1217{ 1217{
1218 int ret = 0; 1218 int ret = 0;
1219 1219
1220#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 1220#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
1221 ret = platform_driver_register(&rt2800soc_driver); 1221 ret = platform_driver_register(&rt2800soc_driver);
1222 if (ret) 1222 if (ret)
1223 return ret; 1223 return ret;
@@ -1225,7 +1225,7 @@ static int __init rt2800pci_init(void)
1225#ifdef CONFIG_PCI 1225#ifdef CONFIG_PCI
1226 ret = pci_register_driver(&rt2800pci_driver); 1226 ret = pci_register_driver(&rt2800pci_driver);
1227 if (ret) { 1227 if (ret) {
1228#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 1228#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
1229 platform_driver_unregister(&rt2800soc_driver); 1229 platform_driver_unregister(&rt2800soc_driver);
1230#endif 1230#endif
1231 return ret; 1231 return ret;
@@ -1240,7 +1240,7 @@ static void __exit rt2800pci_exit(void)
1240#ifdef CONFIG_PCI 1240#ifdef CONFIG_PCI
1241 pci_unregister_driver(&rt2800pci_driver); 1241 pci_unregister_driver(&rt2800pci_driver);
1242#endif 1242#endif
1243#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 1243#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
1244 platform_driver_unregister(&rt2800soc_driver); 1244 platform_driver_unregister(&rt2800soc_driver);
1245#endif 1245#endif
1246} 1246}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index b1ccff474c79..c08d0f4c5f3d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1377,74 +1377,57 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw)
1377 1377
1378void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) 1378void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1379{ 1379{
1380 /* dummy routine needed for callback from rtl_op_configure_filter() */
1381}
1382
1383/*========================================================================== */
1384
1385static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
1386 enum nl80211_iftype type)
1387{
1388 struct rtl_priv *rtlpriv = rtl_priv(hw); 1380 struct rtl_priv *rtlpriv = rtl_priv(hw);
1389 u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1390 struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 1381 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1391 struct rtl_phy *rtlphy = &(rtlpriv->phy); 1382 u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1392 u8 filterout_non_associated_bssid = false;
1393 1383
1394 switch (type) { 1384 if (rtlpriv->psc.rfpwr_state != ERFON)
1395 case NL80211_IFTYPE_ADHOC: 1385 return;
1396 case NL80211_IFTYPE_STATION: 1386
1397 filterout_non_associated_bssid = true; 1387 if (check_bssid) {
1398 break; 1388 u8 tmp;
1399 case NL80211_IFTYPE_UNSPECIFIED:
1400 case NL80211_IFTYPE_AP:
1401 default:
1402 break;
1403 }
1404 if (filterout_non_associated_bssid) {
1405 if (IS_NORMAL_CHIP(rtlhal->version)) { 1389 if (IS_NORMAL_CHIP(rtlhal->version)) {
1406 switch (rtlphy->current_io_type) { 1390 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1407 case IO_CMD_RESUME_DM_BY_SCAN: 1391 tmp = BIT(4);
1408 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1409 rtlpriv->cfg->ops->set_hw_reg(hw,
1410 HW_VAR_RCR, (u8 *)(&reg_rcr));
1411 /* enable update TSF */
1412 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
1413 break;
1414 case IO_CMD_PAUSE_DM_BY_SCAN:
1415 reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1416 rtlpriv->cfg->ops->set_hw_reg(hw,
1417 HW_VAR_RCR, (u8 *)(&reg_rcr));
1418 /* disable update TSF */
1419 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1420 break;
1421 }
1422 } else { 1392 } else {
1423 reg_rcr |= (RCR_CBSSID); 1393 reg_rcr |= RCR_CBSSID;
1424 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1394 tmp = BIT(4) | BIT(5);
1425 (u8 *)(&reg_rcr));
1426 _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
1427 } 1395 }
1428 } else if (filterout_non_associated_bssid == false) { 1396 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1397 (u8 *) (&reg_rcr));
1398 _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp);
1399 } else {
1400 u8 tmp;
1429 if (IS_NORMAL_CHIP(rtlhal->version)) { 1401 if (IS_NORMAL_CHIP(rtlhal->version)) {
1430 reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); 1402 reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1431 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1403 tmp = BIT(4);
1432 (u8 *)(&reg_rcr));
1433 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1434 } else { 1404 } else {
1435 reg_rcr &= (~RCR_CBSSID); 1405 reg_rcr &= ~RCR_CBSSID;
1436 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1406 tmp = BIT(4) | BIT(5);
1437 (u8 *)(&reg_rcr));
1438 _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
1439 } 1407 }
1408 reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
1409 rtlpriv->cfg->ops->set_hw_reg(hw,
1410 HW_VAR_RCR, (u8 *) (&reg_rcr));
1411 _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0);
1440 } 1412 }
1441} 1413}
1442 1414
1415/*========================================================================== */
1416
1443int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) 1417int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
1444{ 1418{
1419 struct rtl_priv *rtlpriv = rtl_priv(hw);
1420
1445 if (_rtl92cu_set_media_status(hw, type)) 1421 if (_rtl92cu_set_media_status(hw, type))
1446 return -EOPNOTSUPP; 1422 return -EOPNOTSUPP;
1447 _rtl92cu_set_check_bssid(hw, type); 1423
1424 if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
1425 if (type != NL80211_IFTYPE_AP)
1426 rtl92cu_set_check_bssid(hw, true);
1427 } else {
1428 rtl92cu_set_check_bssid(hw, false);
1429 }
1430
1448 return 0; 1431 return 0;
1449} 1432}
1450 1433
@@ -2058,8 +2041,6 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
2058 (shortgi_rate << 4) | (shortgi_rate); 2041 (shortgi_rate << 4) | (shortgi_rate);
2059 } 2042 }
2060 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); 2043 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
2061 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
2062 rtl_read_dword(rtlpriv, REG_ARFR0));
2063} 2044}
2064 2045
2065void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) 2046void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 156b52732f3d..5847d6d0881e 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -851,6 +851,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
851 if (unlikely(!_urb)) { 851 if (unlikely(!_urb)) {
852 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 852 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
853 "Can't allocate urb. Drop skb!\n"); 853 "Can't allocate urb. Drop skb!\n");
854 kfree_skb(skb);
854 return; 855 return;
855 } 856 }
856 _rtl_submit_tx_urb(hw, _urb); 857 _rtl_submit_tx_urb(hw, _urb);
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index ab886b7ee327..b41ac7756a4b 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -100,6 +100,27 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
100 return min((size_t)(image - rom), size); 100 return min((size_t)(image - rom), size);
101} 101}
102 102
103static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size)
104{
105 struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
106 loff_t start;
107
108 /* assign the ROM an address if it doesn't have one */
109 if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE))
110 return 0;
111 start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
112 *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
113
114 if (*size == 0)
115 return 0;
116
117 /* Enable ROM space decodes */
118 if (pci_enable_rom(pdev))
119 return 0;
120
121 return start;
122}
123
103/** 124/**
104 * pci_map_rom - map a PCI ROM to kernel space 125 * pci_map_rom - map a PCI ROM to kernel space
105 * @pdev: pointer to pci device struct 126 * @pdev: pointer to pci device struct
@@ -114,21 +135,15 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
114void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) 135void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
115{ 136{
116 struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; 137 struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
117 loff_t start; 138 loff_t start = 0;
118 void __iomem *rom; 139 void __iomem *rom;
119 140
120 /* 141 /*
121 * Some devices may provide ROMs via a source other than the BAR
122 */
123 if (pdev->rom && pdev->romlen) {
124 *size = pdev->romlen;
125 return phys_to_virt(pdev->rom);
126 /*
127 * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy 142 * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
128 * memory map if the VGA enable bit of the Bridge Control register is 143 * memory map if the VGA enable bit of the Bridge Control register is
129 * set for embedded VGA. 144 * set for embedded VGA.
130 */ 145 */
131 } else if (res->flags & IORESOURCE_ROM_SHADOW) { 146 if (res->flags & IORESOURCE_ROM_SHADOW) {
132 /* primary video rom always starts here */ 147 /* primary video rom always starts here */
133 start = (loff_t)0xC0000; 148 start = (loff_t)0xC0000;
134 *size = 0x20000; /* cover C000:0 through E000:0 */ 149 *size = 0x20000; /* cover C000:0 through E000:0 */
@@ -139,21 +154,21 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
139 return (void __iomem *)(unsigned long) 154 return (void __iomem *)(unsigned long)
140 pci_resource_start(pdev, PCI_ROM_RESOURCE); 155 pci_resource_start(pdev, PCI_ROM_RESOURCE);
141 } else { 156 } else {
142 /* assign the ROM an address if it doesn't have one */ 157 start = pci_find_rom(pdev, size);
143 if (res->parent == NULL &&
144 pci_assign_resource(pdev,PCI_ROM_RESOURCE))
145 return NULL;
146 start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
147 *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
148 if (*size == 0)
149 return NULL;
150
151 /* Enable ROM space decodes */
152 if (pci_enable_rom(pdev))
153 return NULL;
154 } 158 }
155 } 159 }
156 160
161 /*
162 * Some devices may provide ROMs via a source other than the BAR
163 */
164 if (!start && pdev->rom && pdev->romlen) {
165 *size = pdev->romlen;
166 return phys_to_virt(pdev->rom);
167 }
168
169 if (!start)
170 return NULL;
171
157 rom = ioremap(start, *size); 172 rom = ioremap(start, *size);
158 if (!rom) { 173 if (!rom) {
159 /* restore enable if ioremap fails */ 174 /* restore enable if ioremap fails */
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index c689c04a4f52..2d2f0a43d36b 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -620,7 +620,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
620 620
621 /* special soc specific control */ 621 /* special soc specific control */
622 if (ctrl->mpp_get || ctrl->mpp_set) { 622 if (ctrl->mpp_get || ctrl->mpp_set) {
623 if (!ctrl->name || !ctrl->mpp_set || !ctrl->mpp_set) { 623 if (!ctrl->name || !ctrl->mpp_get || !ctrl->mpp_set) {
624 dev_err(&pdev->dev, "wrong soc control info\n"); 624 dev_err(&pdev->dev, "wrong soc control info\n");
625 return -EINVAL; 625 return -EINVAL;
626 } 626 }
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index ac8d382a79bb..d611ecfcbf70 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -622,7 +622,7 @@ static const struct file_operations pinconf_dbg_pinname_fops = {
622static int pinconf_dbg_state_print(struct seq_file *s, void *d) 622static int pinconf_dbg_state_print(struct seq_file *s, void *d)
623{ 623{
624 if (strlen(dbg_state_name)) 624 if (strlen(dbg_state_name))
625 seq_printf(s, "%s\n", dbg_pinname); 625 seq_printf(s, "%s\n", dbg_state_name);
626 else 626 else
627 seq_printf(s, "No pin state set\n"); 627 seq_printf(s, "No pin state set\n");
628 return 0; 628 return 0;
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
index e3ed8cb072a5..bfda73d64eed 100644
--- a/drivers/pinctrl/pinconf.h
+++ b/drivers/pinctrl/pinconf.h
@@ -90,7 +90,7 @@ static inline void pinconf_init_device_debugfs(struct dentry *devroot,
90 * pin config. 90 * pin config.
91 */ 91 */
92 92
93#ifdef CONFIG_GENERIC_PINCONF 93#if defined(CONFIG_GENERIC_PINCONF) && defined(CONFIG_DEBUG_FS)
94 94
95void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev, 95void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
96 struct seq_file *s, unsigned pin); 96 struct seq_file *s, unsigned pin);
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c
index caecdd373061..c542a97c82f3 100644
--- a/drivers/pinctrl/pinctrl-abx500.c
+++ b/drivers/pinctrl/pinctrl-abx500.c
@@ -422,7 +422,7 @@ static u8 abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
422 } 422 }
423 423
424 /* check if pin use AlternateFunction register */ 424 /* check if pin use AlternateFunction register */
425 if ((af.alt_bit1 == UNUSED) && (af.alt_bit1 == UNUSED)) 425 if ((af.alt_bit1 == UNUSED) && (af.alt_bit2 == UNUSED))
426 return mode; 426 return mode;
427 /* 427 /*
428 * if pin GPIOSEL bit is set and pin supports alternate function, 428 * if pin GPIOSEL bit is set and pin supports alternate function,
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 75933a6aa828..efb7f10e902a 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1277,21 +1277,80 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
1277} 1277}
1278 1278
1279#ifdef CONFIG_PM 1279#ifdef CONFIG_PM
1280
1281static u32 wakeups[MAX_GPIO_BANKS];
1282static u32 backups[MAX_GPIO_BANKS];
1283
1280static int gpio_irq_set_wake(struct irq_data *d, unsigned state) 1284static int gpio_irq_set_wake(struct irq_data *d, unsigned state)
1281{ 1285{
1282 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); 1286 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
1283 unsigned bank = at91_gpio->pioc_idx; 1287 unsigned bank = at91_gpio->pioc_idx;
1288 unsigned mask = 1 << d->hwirq;
1284 1289
1285 if (unlikely(bank >= MAX_GPIO_BANKS)) 1290 if (unlikely(bank >= MAX_GPIO_BANKS))
1286 return -EINVAL; 1291 return -EINVAL;
1287 1292
1293 if (state)
1294 wakeups[bank] |= mask;
1295 else
1296 wakeups[bank] &= ~mask;
1297
1288 irq_set_irq_wake(at91_gpio->pioc_virq, state); 1298 irq_set_irq_wake(at91_gpio->pioc_virq, state);
1289 1299
1290 return 0; 1300 return 0;
1291} 1301}
1302
1303void at91_pinctrl_gpio_suspend(void)
1304{
1305 int i;
1306
1307 for (i = 0; i < gpio_banks; i++) {
1308 void __iomem *pio;
1309
1310 if (!gpio_chips[i])
1311 continue;
1312
1313 pio = gpio_chips[i]->regbase;
1314
1315 backups[i] = __raw_readl(pio + PIO_IMR);
1316 __raw_writel(backups[i], pio + PIO_IDR);
1317 __raw_writel(wakeups[i], pio + PIO_IER);
1318
1319 if (!wakeups[i]) {
1320 clk_unprepare(gpio_chips[i]->clock);
1321 clk_disable(gpio_chips[i]->clock);
1322 } else {
1323 printk(KERN_DEBUG "GPIO-%c may wake for %08x\n",
1324 'A'+i, wakeups[i]);
1325 }
1326 }
1327}
1328
1329void at91_pinctrl_gpio_resume(void)
1330{
1331 int i;
1332
1333 for (i = 0; i < gpio_banks; i++) {
1334 void __iomem *pio;
1335
1336 if (!gpio_chips[i])
1337 continue;
1338
1339 pio = gpio_chips[i]->regbase;
1340
1341 if (!wakeups[i]) {
1342 if (clk_prepare(gpio_chips[i]->clock) == 0)
1343 clk_enable(gpio_chips[i]->clock);
1344 }
1345
1346 __raw_writel(wakeups[i], pio + PIO_IDR);
1347 __raw_writel(backups[i], pio + PIO_IER);
1348 }
1349}
1350
1292#else 1351#else
1293#define gpio_irq_set_wake NULL 1352#define gpio_irq_set_wake NULL
1294#endif 1353#endif /* CONFIG_PM */
1295 1354
1296static struct irq_chip gpio_irqchip = { 1355static struct irq_chip gpio_irqchip = {
1297 .name = "GPIO", 1356 .name = "GPIO",
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 1a00658b3ea0..bd83c8b01cd1 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -194,6 +194,11 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
194 } 194 }
195 195
196 if (!gpio_range) { 196 if (!gpio_range) {
197 /*
198 * A pin should not be freed more times than allocated.
199 */
200 if (WARN_ON(!desc->mux_usecount))
201 return NULL;
197 desc->mux_usecount--; 202 desc->mux_usecount--;
198 if (desc->mux_usecount) 203 if (desc->mux_usecount)
199 return NULL; 204 return NULL;
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 434ebc3a99dc..0a9f27e094ea 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -44,6 +44,7 @@ static DECLARE_COMPLETION(at91_rtc_updated);
44static unsigned int at91_alarm_year = AT91_RTC_EPOCH; 44static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
45static void __iomem *at91_rtc_regs; 45static void __iomem *at91_rtc_regs;
46static int irq; 46static int irq;
47static u32 at91_rtc_imr;
47 48
48/* 49/*
49 * Decode time/date into rtc_time structure 50 * Decode time/date into rtc_time structure
@@ -108,9 +109,11 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
108 cr = at91_rtc_read(AT91_RTC_CR); 109 cr = at91_rtc_read(AT91_RTC_CR);
109 at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); 110 at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
110 111
112 at91_rtc_imr |= AT91_RTC_ACKUPD;
111 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); 113 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
112 wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ 114 wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
113 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); 115 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
116 at91_rtc_imr &= ~AT91_RTC_ACKUPD;
114 117
115 at91_rtc_write(AT91_RTC_TIMR, 118 at91_rtc_write(AT91_RTC_TIMR,
116 bin2bcd(tm->tm_sec) << 0 119 bin2bcd(tm->tm_sec) << 0
@@ -142,7 +145,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
142 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); 145 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
143 tm->tm_year = at91_alarm_year - 1900; 146 tm->tm_year = at91_alarm_year - 1900;
144 147
145 alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM) 148 alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM)
146 ? 1 : 0; 149 ? 1 : 0;
147 150
148 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 151 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -168,6 +171,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
168 tm.tm_sec = alrm->time.tm_sec; 171 tm.tm_sec = alrm->time.tm_sec;
169 172
170 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); 173 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
174 at91_rtc_imr &= ~AT91_RTC_ALARM;
171 at91_rtc_write(AT91_RTC_TIMALR, 175 at91_rtc_write(AT91_RTC_TIMALR,
172 bin2bcd(tm.tm_sec) << 0 176 bin2bcd(tm.tm_sec) << 0
173 | bin2bcd(tm.tm_min) << 8 177 | bin2bcd(tm.tm_min) << 8
@@ -180,6 +184,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
180 184
181 if (alrm->enabled) { 185 if (alrm->enabled) {
182 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); 186 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
187 at91_rtc_imr |= AT91_RTC_ALARM;
183 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); 188 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
184 } 189 }
185 190
@@ -196,9 +201,12 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
196 201
197 if (enabled) { 202 if (enabled) {
198 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); 203 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
204 at91_rtc_imr |= AT91_RTC_ALARM;
199 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); 205 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
200 } else 206 } else {
201 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); 207 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
208 at91_rtc_imr &= ~AT91_RTC_ALARM;
209 }
202 210
203 return 0; 211 return 0;
204} 212}
@@ -207,12 +215,10 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
207 */ 215 */
208static int at91_rtc_proc(struct device *dev, struct seq_file *seq) 216static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
209{ 217{
210 unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
211
212 seq_printf(seq, "update_IRQ\t: %s\n", 218 seq_printf(seq, "update_IRQ\t: %s\n",
213 (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); 219 (at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no");
214 seq_printf(seq, "periodic_IRQ\t: %s\n", 220 seq_printf(seq, "periodic_IRQ\t: %s\n",
215 (imr & AT91_RTC_SECEV) ? "yes" : "no"); 221 (at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no");
216 222
217 return 0; 223 return 0;
218} 224}
@@ -227,7 +233,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
227 unsigned int rtsr; 233 unsigned int rtsr;
228 unsigned long events = 0; 234 unsigned long events = 0;
229 235
230 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR); 236 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr;
231 if (rtsr) { /* this interrupt is shared! Is it ours? */ 237 if (rtsr) { /* this interrupt is shared! Is it ours? */
232 if (rtsr & AT91_RTC_ALARM) 238 if (rtsr & AT91_RTC_ALARM)
233 events |= (RTC_AF | RTC_IRQF); 239 events |= (RTC_AF | RTC_IRQF);
@@ -291,6 +297,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
291 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | 297 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
292 AT91_RTC_SECEV | AT91_RTC_TIMEV | 298 AT91_RTC_SECEV | AT91_RTC_TIMEV |
293 AT91_RTC_CALEV); 299 AT91_RTC_CALEV);
300 at91_rtc_imr = 0;
294 301
295 ret = request_irq(irq, at91_rtc_interrupt, 302 ret = request_irq(irq, at91_rtc_interrupt,
296 IRQF_SHARED, 303 IRQF_SHARED,
@@ -329,6 +336,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
329 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | 336 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
330 AT91_RTC_SECEV | AT91_RTC_TIMEV | 337 AT91_RTC_SECEV | AT91_RTC_TIMEV |
331 AT91_RTC_CALEV); 338 AT91_RTC_CALEV);
339 at91_rtc_imr = 0;
332 free_irq(irq, pdev); 340 free_irq(irq, pdev);
333 341
334 rtc_device_unregister(rtc); 342 rtc_device_unregister(rtc);
@@ -341,31 +349,35 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
341 349
342/* AT91RM9200 RTC Power management control */ 350/* AT91RM9200 RTC Power management control */
343 351
344static u32 at91_rtc_imr; 352static u32 at91_rtc_bkpimr;
353
345 354
346static int at91_rtc_suspend(struct device *dev) 355static int at91_rtc_suspend(struct device *dev)
347{ 356{
348 /* this IRQ is shared with DBGU and other hardware which isn't 357 /* this IRQ is shared with DBGU and other hardware which isn't
349 * necessarily doing PM like we are... 358 * necessarily doing PM like we are...
350 */ 359 */
351 at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) 360 at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV);
352 & (AT91_RTC_ALARM|AT91_RTC_SECEV); 361 if (at91_rtc_bkpimr) {
353 if (at91_rtc_imr) { 362 if (device_may_wakeup(dev)) {
354 if (device_may_wakeup(dev))
355 enable_irq_wake(irq); 363 enable_irq_wake(irq);
356 else 364 } else {
357 at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr); 365 at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr);
358 } 366 at91_rtc_imr &= ~at91_rtc_bkpimr;
367 }
368}
359 return 0; 369 return 0;
360} 370}
361 371
362static int at91_rtc_resume(struct device *dev) 372static int at91_rtc_resume(struct device *dev)
363{ 373{
364 if (at91_rtc_imr) { 374 if (at91_rtc_bkpimr) {
365 if (device_may_wakeup(dev)) 375 if (device_may_wakeup(dev)) {
366 disable_irq_wake(irq); 376 disable_irq_wake(irq);
367 else 377 } else {
368 at91_rtc_write(AT91_RTC_IER, at91_rtc_imr); 378 at91_rtc_imr |= at91_rtc_bkpimr;
379 at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr);
380 }
369 } 381 }
370 return 0; 382 return 0;
371} 383}
diff --git a/drivers/rtc/rtc-at91rm9200.h b/drivers/rtc/rtc-at91rm9200.h
index da1945e5f714..5f940b6844cb 100644
--- a/drivers/rtc/rtc-at91rm9200.h
+++ b/drivers/rtc/rtc-at91rm9200.h
@@ -64,7 +64,6 @@
64#define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */ 64#define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */
65#define AT91_RTC_IER 0x20 /* Interrupt Enable Register */ 65#define AT91_RTC_IER 0x20 /* Interrupt Enable Register */
66#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */ 66#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
67#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
68 67
69#define AT91_RTC_VER 0x2c /* Valid Entry Register */ 68#define AT91_RTC_VER 0x2c /* Valid Entry Register */
70#define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */ 69#define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index 0dde688ca09b..969abbad7fe3 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -239,11 +239,9 @@ static int da9052_rtc_probe(struct platform_device *pdev)
239 239
240 rtc->da9052 = dev_get_drvdata(pdev->dev.parent); 240 rtc->da9052 = dev_get_drvdata(pdev->dev.parent);
241 platform_set_drvdata(pdev, rtc); 241 platform_set_drvdata(pdev, rtc);
242 rtc->irq = platform_get_irq_byname(pdev, "ALM"); 242 rtc->irq = DA9052_IRQ_ALARM;
243 ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, 243 ret = da9052_request_irq(rtc->da9052, rtc->irq, "ALM",
244 da9052_rtc_irq, 244 da9052_rtc_irq, rtc);
245 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
246 "ALM", rtc);
247 if (ret != 0) { 245 if (ret != 0) {
248 rtc_err(rtc->da9052, "irq registration failed: %d\n", ret); 246 rtc_err(rtc->da9052, "irq registration failed: %d\n", ret);
249 return ret; 247 return ret;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 9978ad4433cb..5ac9c935c151 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -135,6 +135,11 @@ static const struct block_device_operations scm_blk_devops = {
135 .release = scm_release, 135 .release = scm_release,
136}; 136};
137 137
138static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
139{
140 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
141}
142
138static void scm_request_prepare(struct scm_request *scmrq) 143static void scm_request_prepare(struct scm_request *scmrq)
139{ 144{
140 struct scm_blk_dev *bdev = scmrq->bdev; 145 struct scm_blk_dev *bdev = scmrq->bdev;
@@ -195,14 +200,18 @@ void scm_request_requeue(struct scm_request *scmrq)
195 200
196 scm_release_cluster(scmrq); 201 scm_release_cluster(scmrq);
197 blk_requeue_request(bdev->rq, scmrq->request); 202 blk_requeue_request(bdev->rq, scmrq->request);
203 atomic_dec(&bdev->queued_reqs);
198 scm_request_done(scmrq); 204 scm_request_done(scmrq);
199 scm_ensure_queue_restart(bdev); 205 scm_ensure_queue_restart(bdev);
200} 206}
201 207
202void scm_request_finish(struct scm_request *scmrq) 208void scm_request_finish(struct scm_request *scmrq)
203{ 209{
210 struct scm_blk_dev *bdev = scmrq->bdev;
211
204 scm_release_cluster(scmrq); 212 scm_release_cluster(scmrq);
205 blk_end_request_all(scmrq->request, scmrq->error); 213 blk_end_request_all(scmrq->request, scmrq->error);
214 atomic_dec(&bdev->queued_reqs);
206 scm_request_done(scmrq); 215 scm_request_done(scmrq);
207} 216}
208 217
@@ -218,6 +227,10 @@ static void scm_blk_request(struct request_queue *rq)
218 if (req->cmd_type != REQ_TYPE_FS) 227 if (req->cmd_type != REQ_TYPE_FS)
219 continue; 228 continue;
220 229
230 if (!scm_permit_request(bdev, req)) {
231 scm_ensure_queue_restart(bdev);
232 return;
233 }
221 scmrq = scm_request_fetch(); 234 scmrq = scm_request_fetch();
222 if (!scmrq) { 235 if (!scmrq) {
223 SCM_LOG(5, "no request"); 236 SCM_LOG(5, "no request");
@@ -231,11 +244,13 @@ static void scm_blk_request(struct request_queue *rq)
231 return; 244 return;
232 } 245 }
233 if (scm_need_cluster_request(scmrq)) { 246 if (scm_need_cluster_request(scmrq)) {
247 atomic_inc(&bdev->queued_reqs);
234 blk_start_request(req); 248 blk_start_request(req);
235 scm_initiate_cluster_request(scmrq); 249 scm_initiate_cluster_request(scmrq);
236 return; 250 return;
237 } 251 }
238 scm_request_prepare(scmrq); 252 scm_request_prepare(scmrq);
253 atomic_inc(&bdev->queued_reqs);
239 blk_start_request(req); 254 blk_start_request(req);
240 255
241 ret = scm_start_aob(scmrq->aob); 256 ret = scm_start_aob(scmrq->aob);
@@ -244,7 +259,6 @@ static void scm_blk_request(struct request_queue *rq)
244 scm_request_requeue(scmrq); 259 scm_request_requeue(scmrq);
245 return; 260 return;
246 } 261 }
247 atomic_inc(&bdev->queued_reqs);
248 } 262 }
249} 263}
250 264
@@ -280,6 +294,38 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
280 tasklet_hi_schedule(&bdev->tasklet); 294 tasklet_hi_schedule(&bdev->tasklet);
281} 295}
282 296
297static void scm_blk_handle_error(struct scm_request *scmrq)
298{
299 struct scm_blk_dev *bdev = scmrq->bdev;
300 unsigned long flags;
301
302 if (scmrq->error != -EIO)
303 goto restart;
304
305 /* For -EIO the response block is valid. */
306 switch (scmrq->aob->response.eqc) {
307 case EQC_WR_PROHIBIT:
308 spin_lock_irqsave(&bdev->lock, flags);
309 if (bdev->state != SCM_WR_PROHIBIT)
310 pr_info("%lu: Write access to the SCM increment is suspended\n",
311 (unsigned long) bdev->scmdev->address);
312 bdev->state = SCM_WR_PROHIBIT;
313 spin_unlock_irqrestore(&bdev->lock, flags);
314 goto requeue;
315 default:
316 break;
317 }
318
319restart:
320 if (!scm_start_aob(scmrq->aob))
321 return;
322
323requeue:
324 spin_lock_irqsave(&bdev->rq_lock, flags);
325 scm_request_requeue(scmrq);
326 spin_unlock_irqrestore(&bdev->rq_lock, flags);
327}
328
283static void scm_blk_tasklet(struct scm_blk_dev *bdev) 329static void scm_blk_tasklet(struct scm_blk_dev *bdev)
284{ 330{
285 struct scm_request *scmrq; 331 struct scm_request *scmrq;
@@ -293,11 +339,8 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev)
293 spin_unlock_irqrestore(&bdev->lock, flags); 339 spin_unlock_irqrestore(&bdev->lock, flags);
294 340
295 if (scmrq->error && scmrq->retries-- > 0) { 341 if (scmrq->error && scmrq->retries-- > 0) {
296 if (scm_start_aob(scmrq->aob)) { 342 scm_blk_handle_error(scmrq);
297 spin_lock_irqsave(&bdev->rq_lock, flags); 343
298 scm_request_requeue(scmrq);
299 spin_unlock_irqrestore(&bdev->rq_lock, flags);
300 }
301 /* Request restarted or requeued, handle next. */ 344 /* Request restarted or requeued, handle next. */
302 spin_lock_irqsave(&bdev->lock, flags); 345 spin_lock_irqsave(&bdev->lock, flags);
303 continue; 346 continue;
@@ -310,7 +353,6 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev)
310 } 353 }
311 354
312 scm_request_finish(scmrq); 355 scm_request_finish(scmrq);
313 atomic_dec(&bdev->queued_reqs);
314 spin_lock_irqsave(&bdev->lock, flags); 356 spin_lock_irqsave(&bdev->lock, flags);
315 } 357 }
316 spin_unlock_irqrestore(&bdev->lock, flags); 358 spin_unlock_irqrestore(&bdev->lock, flags);
@@ -332,6 +374,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
332 } 374 }
333 375
334 bdev->scmdev = scmdev; 376 bdev->scmdev = scmdev;
377 bdev->state = SCM_OPER;
335 spin_lock_init(&bdev->rq_lock); 378 spin_lock_init(&bdev->rq_lock);
336 spin_lock_init(&bdev->lock); 379 spin_lock_init(&bdev->lock);
337 INIT_LIST_HEAD(&bdev->finished_requests); 380 INIT_LIST_HEAD(&bdev->finished_requests);
@@ -396,6 +439,18 @@ void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
396 put_disk(bdev->gendisk); 439 put_disk(bdev->gendisk);
397} 440}
398 441
442void scm_blk_set_available(struct scm_blk_dev *bdev)
443{
444 unsigned long flags;
445
446 spin_lock_irqsave(&bdev->lock, flags);
447 if (bdev->state == SCM_WR_PROHIBIT)
448 pr_info("%lu: Write access to the SCM increment is restored\n",
449 (unsigned long) bdev->scmdev->address);
450 bdev->state = SCM_OPER;
451 spin_unlock_irqrestore(&bdev->lock, flags);
452}
453
399static int __init scm_blk_init(void) 454static int __init scm_blk_init(void)
400{ 455{
401 int ret = -EINVAL; 456 int ret = -EINVAL;
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 3c1ccf494647..8b387b32fd62 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -21,6 +21,7 @@ struct scm_blk_dev {
21 spinlock_t rq_lock; /* guard the request queue */ 21 spinlock_t rq_lock; /* guard the request queue */
22 spinlock_t lock; /* guard the rest of the blockdev */ 22 spinlock_t lock; /* guard the rest of the blockdev */
23 atomic_t queued_reqs; 23 atomic_t queued_reqs;
24 enum {SCM_OPER, SCM_WR_PROHIBIT} state;
24 struct list_head finished_requests; 25 struct list_head finished_requests;
25#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE 26#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
26 struct list_head cluster_list; 27 struct list_head cluster_list;
@@ -48,6 +49,7 @@ struct scm_request {
48 49
49int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); 50int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
50void scm_blk_dev_cleanup(struct scm_blk_dev *); 51void scm_blk_dev_cleanup(struct scm_blk_dev *);
52void scm_blk_set_available(struct scm_blk_dev *);
51void scm_blk_irq(struct scm_device *, void *, int); 53void scm_blk_irq(struct scm_device *, void *, int);
52 54
53void scm_request_finish(struct scm_request *); 55void scm_request_finish(struct scm_request *);
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c
index 9fa0a908607b..5f6180d6ff08 100644
--- a/drivers/s390/block/scm_drv.c
+++ b/drivers/s390/block/scm_drv.c
@@ -13,12 +13,23 @@
13#include <asm/eadm.h> 13#include <asm/eadm.h>
14#include "scm_blk.h" 14#include "scm_blk.h"
15 15
16static void notify(struct scm_device *scmdev) 16static void scm_notify(struct scm_device *scmdev, enum scm_event event)
17{ 17{
18 pr_info("%lu: The capabilities of the SCM increment changed\n", 18 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
19 (unsigned long) scmdev->address); 19
20 SCM_LOG(2, "State changed"); 20 switch (event) {
21 SCM_LOG_STATE(2, scmdev); 21 case SCM_CHANGE:
22 pr_info("%lu: The capabilities of the SCM increment changed\n",
23 (unsigned long) scmdev->address);
24 SCM_LOG(2, "State changed");
25 SCM_LOG_STATE(2, scmdev);
26 break;
27 case SCM_AVAIL:
28 SCM_LOG(2, "Increment available");
29 SCM_LOG_STATE(2, scmdev);
30 scm_blk_set_available(bdev);
31 break;
32 }
22} 33}
23 34
24static int scm_probe(struct scm_device *scmdev) 35static int scm_probe(struct scm_device *scmdev)
@@ -64,7 +75,7 @@ static struct scm_driver scm_drv = {
64 .name = "scm_block", 75 .name = "scm_block",
65 .owner = THIS_MODULE, 76 .owner = THIS_MODULE,
66 }, 77 },
67 .notify = notify, 78 .notify = scm_notify,
68 .probe = scm_probe, 79 .probe = scm_probe,
69 .remove = scm_remove, 80 .remove = scm_remove,
70 .handler = scm_blk_irq, 81 .handler = scm_blk_irq,
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 30a2255389e5..cd798386b622 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -627,6 +627,8 @@ static int __init sclp_detect_standby_memory(void)
627 struct read_storage_sccb *sccb; 627 struct read_storage_sccb *sccb;
628 int i, id, assigned, rc; 628 int i, id, assigned, rc;
629 629
630 if (OLDMEM_BASE) /* No standby memory in kdump mode */
631 return 0;
630 if (!early_read_info_sccb_valid) 632 if (!early_read_info_sccb_valid)
631 return 0; 633 return 0;
632 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) 634 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 31ceef1beb8b..e16c553f6556 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -433,6 +433,20 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
433 " failed (rc=%d).\n", ret); 433 " failed (rc=%d).\n", ret);
434} 434}
435 435
436static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
437{
438 int ret;
439
440 CIO_CRW_EVENT(4, "chsc: scm available information\n");
441 if (sei_area->rs != 7)
442 return;
443
444 ret = scm_process_availability_information();
445 if (ret)
446 CIO_CRW_EVENT(0, "chsc: process availability information"
447 " failed (rc=%d).\n", ret);
448}
449
436static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 450static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
437{ 451{
438 switch (sei_area->cc) { 452 switch (sei_area->cc) {
@@ -468,6 +482,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
468 case 12: /* scm change notification */ 482 case 12: /* scm change notification */
469 chsc_process_sei_scm_change(sei_area); 483 chsc_process_sei_scm_change(sei_area);
470 break; 484 break;
485 case 14: /* scm available notification */
486 chsc_process_sei_scm_avail(sei_area);
487 break;
471 default: /* other stuff */ 488 default: /* other stuff */
472 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", 489 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
473 sei_area->cc); 490 sei_area->cc);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 227e05f674b3..349d5fc47196 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -156,8 +156,10 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
156 156
157#ifdef CONFIG_SCM_BUS 157#ifdef CONFIG_SCM_BUS
158int scm_update_information(void); 158int scm_update_information(void);
159int scm_process_availability_information(void);
159#else /* CONFIG_SCM_BUS */ 160#else /* CONFIG_SCM_BUS */
160static inline int scm_update_information(void) { return 0; } 161static inline int scm_update_information(void) { return 0; }
162static inline int scm_process_availability_information(void) { return 0; }
161#endif /* CONFIG_SCM_BUS */ 163#endif /* CONFIG_SCM_BUS */
162 164
163 165
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index bcf20f3aa51b..46ec25632e8b 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -211,7 +211,7 @@ static void scmdev_update(struct scm_device *scmdev, struct sale *sale)
211 goto out; 211 goto out;
212 scmdrv = to_scm_drv(scmdev->dev.driver); 212 scmdrv = to_scm_drv(scmdev->dev.driver);
213 if (changed && scmdrv->notify) 213 if (changed && scmdrv->notify)
214 scmdrv->notify(scmdev); 214 scmdrv->notify(scmdev, SCM_CHANGE);
215out: 215out:
216 device_unlock(&scmdev->dev); 216 device_unlock(&scmdev->dev);
217 if (changed) 217 if (changed)
@@ -297,6 +297,22 @@ int scm_update_information(void)
297 return ret; 297 return ret;
298} 298}
299 299
300static int scm_dev_avail(struct device *dev, void *unused)
301{
302 struct scm_driver *scmdrv = to_scm_drv(dev->driver);
303 struct scm_device *scmdev = to_scm_dev(dev);
304
305 if (dev->driver && scmdrv->notify)
306 scmdrv->notify(scmdev, SCM_AVAIL);
307
308 return 0;
309}
310
311int scm_process_availability_information(void)
312{
313 return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail);
314}
315
300static int __init scm_init(void) 316static int __init scm_init(void)
301{ 317{
302 int ret; 318 int ret;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d87961d4c0de..8c0622399fcd 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -916,6 +916,7 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
916 void *reply_param); 916 void *reply_param);
917int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); 917int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
918int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); 918int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
919int qeth_get_elements_for_frags(struct sk_buff *);
919int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, 920int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
920 struct sk_buff *, struct qeth_hdr *, int, int, int); 921 struct sk_buff *, struct qeth_hdr *, int, int, int);
921int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, 922int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 0d8cdff81813..0d73a999983d 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3679,6 +3679,25 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3679} 3679}
3680EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3680EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3681 3681
3682int qeth_get_elements_for_frags(struct sk_buff *skb)
3683{
3684 int cnt, length, e, elements = 0;
3685 struct skb_frag_struct *frag;
3686 char *data;
3687
3688 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3689 frag = &skb_shinfo(skb)->frags[cnt];
3690 data = (char *)page_to_phys(skb_frag_page(frag)) +
3691 frag->page_offset;
3692 length = frag->size;
3693 e = PFN_UP((unsigned long)data + length - 1) -
3694 PFN_DOWN((unsigned long)data);
3695 elements += e;
3696 }
3697 return elements;
3698}
3699EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3700
3682int qeth_get_elements_no(struct qeth_card *card, void *hdr, 3701int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3683 struct sk_buff *skb, int elems) 3702 struct sk_buff *skb, int elems)
3684{ 3703{
@@ -3686,7 +3705,8 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3686 int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - 3705 int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) -
3687 PFN_DOWN((unsigned long)skb->data); 3706 PFN_DOWN((unsigned long)skb->data);
3688 3707
3689 elements_needed += skb_shinfo(skb)->nr_frags; 3708 elements_needed += qeth_get_elements_for_frags(skb);
3709
3690 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { 3710 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3691 QETH_DBF_MESSAGE(2, "Invalid size of IP packet " 3711 QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
3692 "(Number=%d / Length=%d). Discarded.\n", 3712 "(Number=%d / Length=%d). Discarded.\n",
@@ -3771,12 +3791,23 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3771 3791
3772 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3792 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3773 frag = &skb_shinfo(skb)->frags[cnt]; 3793 frag = &skb_shinfo(skb)->frags[cnt];
3774 buffer->element[element].addr = (char *) 3794 data = (char *)page_to_phys(skb_frag_page(frag)) +
3775 page_to_phys(skb_frag_page(frag)) 3795 frag->page_offset;
3776 + frag->page_offset; 3796 length = frag->size;
3777 buffer->element[element].length = frag->size; 3797 while (length > 0) {
3778 buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; 3798 length_here = PAGE_SIZE -
3779 element++; 3799 ((unsigned long) data % PAGE_SIZE);
3800 if (length < length_here)
3801 length_here = length;
3802
3803 buffer->element[element].addr = data;
3804 buffer->element[element].length = length_here;
3805 buffer->element[element].eflags =
3806 SBAL_EFLAGS_MIDDLE_FRAG;
3807 length -= length_here;
3808 data += length_here;
3809 element++;
3810 }
3780 } 3811 }
3781 3812
3782 if (buffer->element[element - 1].eflags) 3813 if (buffer->element[element - 1].eflags)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 091ca0efa1c5..8710337dab3e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -623,7 +623,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
623 return rc; 623 return rc;
624} 624}
625 625
626static void qeth_l3_correct_routing_type(struct qeth_card *card, 626static int qeth_l3_correct_routing_type(struct qeth_card *card,
627 enum qeth_routing_types *type, enum qeth_prot_versions prot) 627 enum qeth_routing_types *type, enum qeth_prot_versions prot)
628{ 628{
629 if (card->info.type == QETH_CARD_TYPE_IQD) { 629 if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -632,7 +632,7 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card,
632 case PRIMARY_CONNECTOR: 632 case PRIMARY_CONNECTOR:
633 case SECONDARY_CONNECTOR: 633 case SECONDARY_CONNECTOR:
634 case MULTICAST_ROUTER: 634 case MULTICAST_ROUTER:
635 return; 635 return 0;
636 default: 636 default:
637 goto out_inval; 637 goto out_inval;
638 } 638 }
@@ -641,17 +641,18 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card,
641 case NO_ROUTER: 641 case NO_ROUTER:
642 case PRIMARY_ROUTER: 642 case PRIMARY_ROUTER:
643 case SECONDARY_ROUTER: 643 case SECONDARY_ROUTER:
644 return; 644 return 0;
645 case MULTICAST_ROUTER: 645 case MULTICAST_ROUTER:
646 if (qeth_is_ipafunc_supported(card, prot, 646 if (qeth_is_ipafunc_supported(card, prot,
647 IPA_OSA_MC_ROUTER)) 647 IPA_OSA_MC_ROUTER))
648 return; 648 return 0;
649 default: 649 default:
650 goto out_inval; 650 goto out_inval;
651 } 651 }
652 } 652 }
653out_inval: 653out_inval:
654 *type = NO_ROUTER; 654 *type = NO_ROUTER;
655 return -EINVAL;
655} 656}
656 657
657int qeth_l3_setrouting_v4(struct qeth_card *card) 658int qeth_l3_setrouting_v4(struct qeth_card *card)
@@ -660,8 +661,10 @@ int qeth_l3_setrouting_v4(struct qeth_card *card)
660 661
661 QETH_CARD_TEXT(card, 3, "setrtg4"); 662 QETH_CARD_TEXT(card, 3, "setrtg4");
662 663
663 qeth_l3_correct_routing_type(card, &card->options.route4.type, 664 rc = qeth_l3_correct_routing_type(card, &card->options.route4.type,
664 QETH_PROT_IPV4); 665 QETH_PROT_IPV4);
666 if (rc)
667 return rc;
665 668
666 rc = qeth_l3_send_setrouting(card, card->options.route4.type, 669 rc = qeth_l3_send_setrouting(card, card->options.route4.type,
667 QETH_PROT_IPV4); 670 QETH_PROT_IPV4);
@@ -683,8 +686,10 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
683 686
684 if (!qeth_is_supported(card, IPA_IPV6)) 687 if (!qeth_is_supported(card, IPA_IPV6))
685 return 0; 688 return 0;
686 qeth_l3_correct_routing_type(card, &card->options.route6.type, 689 rc = qeth_l3_correct_routing_type(card, &card->options.route6.type,
687 QETH_PROT_IPV6); 690 QETH_PROT_IPV6);
691 if (rc)
692 return rc;
688 693
689 rc = qeth_l3_send_setrouting(card, card->options.route6.type, 694 rc = qeth_l3_send_setrouting(card, card->options.route6.type,
690 QETH_PROT_IPV6); 695 QETH_PROT_IPV6);
@@ -2898,7 +2903,9 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb)
2898 tcp_hdr(skb)->doff * 4; 2903 tcp_hdr(skb)->doff * 4;
2899 int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); 2904 int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
2900 int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); 2905 int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd);
2901 elements += skb_shinfo(skb)->nr_frags; 2906
2907 elements += qeth_get_elements_for_frags(skb);
2908
2902 return elements; 2909 return elements;
2903} 2910}
2904 2911
@@ -3348,7 +3355,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3348 rc = -ENODEV; 3355 rc = -ENODEV;
3349 goto out_remove; 3356 goto out_remove;
3350 } 3357 }
3351 qeth_trace_features(card);
3352 3358
3353 if (!card->dev && qeth_l3_setup_netdev(card)) { 3359 if (!card->dev && qeth_l3_setup_netdev(card)) {
3354 rc = -ENODEV; 3360 rc = -ENODEV;
@@ -3425,6 +3431,7 @@ contin:
3425 qeth_l3_set_multicast_list(card->dev); 3431 qeth_l3_set_multicast_list(card->dev);
3426 rtnl_unlock(); 3432 rtnl_unlock();
3427 } 3433 }
3434 qeth_trace_features(card);
3428 /* let user_space know that device is online */ 3435 /* let user_space know that device is online */
3429 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 3436 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
3430 mutex_unlock(&card->conf_mutex); 3437 mutex_unlock(&card->conf_mutex);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index ebc379486267..e70af2406ff9 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -87,6 +87,8 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
87 rc = qeth_l3_setrouting_v6(card); 87 rc = qeth_l3_setrouting_v6(card);
88 } 88 }
89out: 89out:
90 if (rc)
91 route->type = old_route_type;
90 mutex_unlock(&card->conf_mutex); 92 mutex_unlock(&card->conf_mutex);
91 return rc ? rc : count; 93 return rc ? rc : count;
92} 94}
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 81a1fe661579..71a73ec5af8d 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -1483,7 +1483,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
1483 case TRIG_NONE: 1483 case TRIG_NONE:
1484 /* continous acquisition */ 1484 /* continous acquisition */
1485 devpriv->ai_continous = 1; 1485 devpriv->ai_continous = 1;
1486 devpriv->ai_sample_count = 0; 1486 devpriv->ai_sample_count = 1;
1487 break; 1487 break;
1488 } 1488 }
1489 1489
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
index 73582705e8c5..5c3714530961 100644
--- a/drivers/staging/zcache/Kconfig
+++ b/drivers/staging/zcache/Kconfig
@@ -15,7 +15,7 @@ config RAMSTER
15 depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE=y 15 depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE=y
16 depends on NET 16 depends on NET
17 # must ensure struct page is 8-byte aligned 17 # must ensure struct page is 8-byte aligned
18 select HAVE_ALIGNED_STRUCT_PAGE if !64_BIT 18 select HAVE_ALIGNED_STRUCT_PAGE if !64BIT
19 default n 19 default n
20 help 20 help
21 RAMster allows RAM on other machines in a cluster to be utilized 21 RAMster allows RAM on other machines in a cluster to be utilized
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index db0cf7c8adde..a0fc7b9eea65 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -166,6 +166,7 @@ static int chap_server_compute_md5(
166{ 166{
167 char *endptr; 167 char *endptr;
168 unsigned long id; 168 unsigned long id;
169 unsigned char id_as_uchar;
169 unsigned char digest[MD5_SIGNATURE_SIZE]; 170 unsigned char digest[MD5_SIGNATURE_SIZE];
170 unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2]; 171 unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
171 unsigned char identifier[10], *challenge = NULL; 172 unsigned char identifier[10], *challenge = NULL;
@@ -355,7 +356,9 @@ static int chap_server_compute_md5(
355 goto out; 356 goto out;
356 } 357 }
357 358
358 sg_init_one(&sg, &id, 1); 359 /* To handle both endiannesses */
360 id_as_uchar = id;
361 sg_init_one(&sg, &id_as_uchar, 1);
359 ret = crypto_hash_update(&desc, &sg, 1); 362 ret = crypto_hash_update(&desc, &sg, 1);
360 if (ret < 0) { 363 if (ret < 0) {
361 pr_err("crypto_hash_update() failed for id\n"); 364 pr_err("crypto_hash_update() failed for id\n");
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index bc02b018ae46..37ffc5bd2399 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -7,7 +7,7 @@
7#define FD_DEVICE_QUEUE_DEPTH 32 7#define FD_DEVICE_QUEUE_DEPTH 32
8#define FD_MAX_DEVICE_QUEUE_DEPTH 128 8#define FD_MAX_DEVICE_QUEUE_DEPTH 128
9#define FD_BLOCKSIZE 512 9#define FD_BLOCKSIZE 512
10#define FD_MAX_SECTORS 1024 10#define FD_MAX_SECTORS 2048
11 11
12#define RRF_EMULATE_CDB 0x01 12#define RRF_EMULATE_CDB 0x01
13#define RRF_GOT_LBA 0x02 13#define RRF_GOT_LBA 0x02
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 82e78d72fdb6..e992b27aa090 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -883,7 +883,14 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
883 pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, 883 pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
884 page, len, off); 884 page, len, off);
885 885
886 while (len > 0 && data_len > 0) { 886 /*
887 * We only have one page of data in each sg element,
888 * we can not cross a page boundary.
889 */
890 if (off + len > PAGE_SIZE)
891 goto fail;
892
893 if (len > 0 && data_len > 0) {
887 bytes = min_t(unsigned int, len, PAGE_SIZE - off); 894 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
888 bytes = min(bytes, data_len); 895 bytes = min(bytes, data_len);
889 896
@@ -940,9 +947,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
940 bio = NULL; 947 bio = NULL;
941 } 948 }
942 949
943 len -= bytes;
944 data_len -= bytes; 950 data_len -= bytes;
945 off = 0;
946 } 951 }
947 } 952 }
948 953
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 290230de2c53..60d4b5185f32 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -464,8 +464,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
464 break; 464 break;
465 case SYNCHRONIZE_CACHE: 465 case SYNCHRONIZE_CACHE:
466 case SYNCHRONIZE_CACHE_16: 466 case SYNCHRONIZE_CACHE_16:
467 if (!ops->execute_sync_cache) 467 if (!ops->execute_sync_cache) {
468 return TCM_UNSUPPORTED_SCSI_OPCODE; 468 size = 0;
469 cmd->execute_cmd = sbc_emulate_noop;
470 break;
471 }
469 472
470 /* 473 /*
471 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE 474 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 9169d6a5d7e4..aac9d2727e3c 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -711,7 +711,8 @@ int core_tpg_register(
711 711
712 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { 712 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
713 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { 713 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
714 kfree(se_tpg); 714 array_free(se_tpg->tpg_lun_list,
715 TRANSPORT_MAX_LUNS_PER_TPG);
715 return -ENOMEM; 716 return -ENOMEM;
716 } 717 }
717 } 718 }
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 2030b608136d..3243ea790eab 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1139,8 +1139,10 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1139 return ret; 1139 return ret;
1140 1140
1141 ret = target_check_reservation(cmd); 1141 ret = target_check_reservation(cmd);
1142 if (ret) 1142 if (ret) {
1143 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1143 return ret; 1144 return ret;
1145 }
1144 1146
1145 ret = dev->transport->parse_cdb(cmd); 1147 ret = dev->transport->parse_cdb(cmd);
1146 if (ret) 1148 if (ret)
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index 7b0bfa0e7a9c..3078c403b42d 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -143,22 +143,18 @@ static int dove_thermal_probe(struct platform_device *pdev)
143 if (!priv) 143 if (!priv)
144 return -ENOMEM; 144 return -ENOMEM;
145 145
146 priv->sensor = devm_request_and_ioremap(&pdev->dev, res); 146 priv->sensor = devm_ioremap_resource(&pdev->dev, res);
147 if (!priv->sensor) { 147 if (IS_ERR(priv->sensor))
148 dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); 148 return PTR_ERR(priv->sensor);
149 return -EADDRNOTAVAIL;
150 }
151 149
152 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 150 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
153 if (!res) { 151 if (!res) {
154 dev_err(&pdev->dev, "Failed to get platform resource\n"); 152 dev_err(&pdev->dev, "Failed to get platform resource\n");
155 return -ENODEV; 153 return -ENODEV;
156 } 154 }
157 priv->control = devm_request_and_ioremap(&pdev->dev, res); 155 priv->control = devm_ioremap_resource(&pdev->dev, res);
158 if (!priv->control) { 156 if (IS_ERR(priv->control))
159 dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); 157 return PTR_ERR(priv->control);
160 return -EADDRNOTAVAIL;
161 }
162 158
163 ret = dove_init_sensor(priv); 159 ret = dove_init_sensor(priv);
164 if (ret) { 160 if (ret) {
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c
index e04ebd8671ac..46568c078dee 100644
--- a/drivers/thermal/exynos_thermal.c
+++ b/drivers/thermal/exynos_thermal.c
@@ -476,7 +476,7 @@ static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
476 476
477 if (IS_ERR(th_zone->therm_dev)) { 477 if (IS_ERR(th_zone->therm_dev)) {
478 pr_err("Failed to register thermal zone device\n"); 478 pr_err("Failed to register thermal zone device\n");
479 ret = -EINVAL; 479 ret = PTR_ERR(th_zone->therm_dev);
480 goto err_unregister; 480 goto err_unregister;
481 } 481 }
482 th_zone->mode = THERMAL_DEVICE_ENABLED; 482 th_zone->mode = THERMAL_DEVICE_ENABLED;
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
index 65cb4f09e8f6..e5500edb5285 100644
--- a/drivers/thermal/kirkwood_thermal.c
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -85,11 +85,9 @@ static int kirkwood_thermal_probe(struct platform_device *pdev)
85 if (!priv) 85 if (!priv)
86 return -ENOMEM; 86 return -ENOMEM;
87 87
88 priv->sensor = devm_request_and_ioremap(&pdev->dev, res); 88 priv->sensor = devm_ioremap_resource(&pdev->dev, res);
89 if (!priv->sensor) { 89 if (IS_ERR(priv->sensor))
90 dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); 90 return PTR_ERR(priv->sensor);
91 return -EADDRNOTAVAIL;
92 }
93 91
94 thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0, 92 thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0,
95 priv, &ops, NULL, 0, 0); 93 priv, &ops, NULL, 0, 0);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 28f091994013..2cc5b6115e3e 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -145,6 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
145 struct device *dev = rcar_priv_to_dev(priv); 145 struct device *dev = rcar_priv_to_dev(priv);
146 int i; 146 int i;
147 int ctemp, old, new; 147 int ctemp, old, new;
148 int ret = -EINVAL;
148 149
149 mutex_lock(&priv->lock); 150 mutex_lock(&priv->lock);
150 151
@@ -174,7 +175,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
174 175
175 if (!ctemp) { 176 if (!ctemp) {
176 dev_err(dev, "thermal sensor was broken\n"); 177 dev_err(dev, "thermal sensor was broken\n");
177 return -EINVAL; 178 goto err_out_unlock;
178 } 179 }
179 180
180 /* 181 /*
@@ -192,10 +193,10 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
192 dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp); 193 dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp);
193 194
194 priv->ctemp = ctemp; 195 priv->ctemp = ctemp;
195 196 ret = 0;
197err_out_unlock:
196 mutex_unlock(&priv->lock); 198 mutex_unlock(&priv->lock);
197 199 return ret;
198 return 0;
199} 200}
200 201
201static int rcar_thermal_get_temp(struct thermal_zone_device *zone, 202static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
@@ -363,6 +364,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
363 struct resource *res, *irq; 364 struct resource *res, *irq;
364 int mres = 0; 365 int mres = 0;
365 int i; 366 int i;
367 int ret = -ENODEV;
366 int idle = IDLE_INTERVAL; 368 int idle = IDLE_INTERVAL;
367 369
368 common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); 370 common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
@@ -399,11 +401,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
399 /* 401 /*
400 * rcar_has_irq_support() will be enabled 402 * rcar_has_irq_support() will be enabled
401 */ 403 */
402 common->base = devm_request_and_ioremap(dev, res); 404 common->base = devm_ioremap_resource(dev, res);
403 if (!common->base) { 405 if (IS_ERR(common->base))
404 dev_err(dev, "Unable to ioremap thermal register\n"); 406 return PTR_ERR(common->base);
405 return -ENOMEM;
406 }
407 407
408 /* enable temperature comparation */ 408 /* enable temperature comparation */
409 rcar_thermal_common_write(common, ENR, 0x00030303); 409 rcar_thermal_common_write(common, ENR, 0x00030303);
@@ -422,11 +422,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
422 return -ENOMEM; 422 return -ENOMEM;
423 } 423 }
424 424
425 priv->base = devm_request_and_ioremap(dev, res); 425 priv->base = devm_ioremap_resource(dev, res);
426 if (!priv->base) { 426 if (IS_ERR(priv->base))
427 dev_err(dev, "Unable to ioremap priv register\n"); 427 return PTR_ERR(priv->base);
428 return -ENOMEM;
429 }
430 428
431 priv->common = common; 429 priv->common = common;
432 priv->id = i; 430 priv->id = i;
@@ -441,6 +439,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
441 idle); 439 idle);
442 if (IS_ERR(priv->zone)) { 440 if (IS_ERR(priv->zone)) {
443 dev_err(dev, "can't register thermal zone\n"); 441 dev_err(dev, "can't register thermal zone\n");
442 ret = PTR_ERR(priv->zone);
444 goto error_unregister; 443 goto error_unregister;
445 } 444 }
446 445
@@ -460,7 +459,7 @@ error_unregister:
460 rcar_thermal_for_each_priv(priv, common) 459 rcar_thermal_for_each_priv(priv, common)
461 thermal_zone_device_unregister(priv->zone); 460 thermal_zone_device_unregister(priv->zone);
462 461
463 return -ENODEV; 462 return ret;
464} 463}
465 464
466static int rcar_thermal_remove(struct platform_device *pdev) 465static int rcar_thermal_remove(struct platform_device *pdev)
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250_core.c
index cf6a5383748a..35f9c96aada9 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -3418,6 +3418,7 @@ MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
3418#endif 3418#endif
3419MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR); 3419MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
3420 3420
3421#ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS
3421#ifndef MODULE 3422#ifndef MODULE
3422/* This module was renamed to 8250_core in 3.7. Keep the old "8250" name 3423/* This module was renamed to 8250_core in 3.7. Keep the old "8250" name
3423 * working as well for the module options so we don't break people. We 3424 * working as well for the module options so we don't break people. We
@@ -3432,7 +3433,7 @@ MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
3432static void __used s8250_options(void) 3433static void __used s8250_options(void)
3433{ 3434{
3434#undef MODULE_PARAM_PREFIX 3435#undef MODULE_PARAM_PREFIX
3435#define MODULE_PARAM_PREFIX "8250." 3436#define MODULE_PARAM_PREFIX "8250_core."
3436 3437
3437 module_param_cb(share_irqs, &param_ops_uint, &share_irqs, 0644); 3438 module_param_cb(share_irqs, &param_ops_uint, &share_irqs, 0644);
3438 module_param_cb(nr_uarts, &param_ops_uint, &nr_uarts, 0644); 3439 module_param_cb(nr_uarts, &param_ops_uint, &nr_uarts, 0644);
@@ -3444,5 +3445,6 @@ static void __used s8250_options(void)
3444#endif 3445#endif
3445} 3446}
3446#else 3447#else
3447MODULE_ALIAS("8250"); 3448MODULE_ALIAS("8250_core");
3449#endif
3448#endif 3450#endif
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index aa76825229dc..26e3a97ab157 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1554,6 +1554,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
1554#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001 1554#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
1555#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d 1555#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
1556#define PCI_VENDOR_ID_WCH 0x4348 1556#define PCI_VENDOR_ID_WCH 0x4348
1557#define PCI_DEVICE_ID_WCH_CH352_2S 0x3253
1557#define PCI_DEVICE_ID_WCH_CH353_4S 0x3453 1558#define PCI_DEVICE_ID_WCH_CH353_4S 0x3453
1558#define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046 1559#define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046
1559#define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053 1560#define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053
@@ -2172,6 +2173,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
2172 .subdevice = PCI_ANY_ID, 2173 .subdevice = PCI_ANY_ID,
2173 .setup = pci_wch_ch353_setup, 2174 .setup = pci_wch_ch353_setup,
2174 }, 2175 },
2176 /* WCH CH352 2S card (16550 clone) */
2177 {
2178 .vendor = PCI_VENDOR_ID_WCH,
2179 .device = PCI_DEVICE_ID_WCH_CH352_2S,
2180 .subvendor = PCI_ANY_ID,
2181 .subdevice = PCI_ANY_ID,
2182 .setup = pci_wch_ch353_setup,
2183 },
2175 /* 2184 /*
2176 * ASIX devices with FIFO bug 2185 * ASIX devices with FIFO bug
2177 */ 2186 */
@@ -4870,6 +4879,10 @@ static struct pci_device_id serial_pci_tbl[] = {
4870 PCI_ANY_ID, PCI_ANY_ID, 4879 PCI_ANY_ID, PCI_ANY_ID,
4871 0, 0, pbn_b0_bt_2_115200 }, 4880 0, 0, pbn_b0_bt_2_115200 },
4872 4881
4882 { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S,
4883 PCI_ANY_ID, PCI_ANY_ID,
4884 0, 0, pbn_b0_bt_2_115200 },
4885
4873 /* 4886 /*
4874 * Commtech, Inc. Fastcom adapters 4887 * Commtech, Inc. Fastcom adapters
4875 */ 4888 */
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 2ef9537bcb2c..80fe91e64a52 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -33,6 +33,23 @@ config SERIAL_8250
33 Most people will say Y or M here, so that they can use serial mice, 33 Most people will say Y or M here, so that they can use serial mice,
34 modems and similar devices connecting to the standard serial ports. 34 modems and similar devices connecting to the standard serial ports.
35 35
36config SERIAL_8250_DEPRECATED_OPTIONS
37 bool "Support 8250_core.* kernel options (DEPRECATED)"
38 depends on SERIAL_8250
39 default y
40 ---help---
41 In 3.7 we renamed 8250 to 8250_core by mistake, so now we have to
42 accept kernel parameters in both forms like 8250_core.nr_uarts=4 and
43 8250.nr_uarts=4. We now renamed the module back to 8250, but if
44 anybody noticed in 3.7 and changed their userspace we still have to
45 keep the 8350_core.* options around until they revert the changes
46 they already did.
47
48 If 8250 is built as a module, this adds 8250_core alias instead.
49
50 If you did not notice yet and/or you have userspace from pre-3.7, it
51 is safe (and recommended) to say N here.
52
36config SERIAL_8250_PNP 53config SERIAL_8250_PNP
37 bool "8250/16550 PNP device support" if EXPERT 54 bool "8250/16550 PNP device support" if EXPERT
38 depends on SERIAL_8250 && PNP 55 depends on SERIAL_8250 && PNP
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index a23838a4d535..36d68d054307 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -2,10 +2,10 @@
2# Makefile for the 8250 serial device drivers. 2# Makefile for the 8250 serial device drivers.
3# 3#
4 4
5obj-$(CONFIG_SERIAL_8250) += 8250_core.o 5obj-$(CONFIG_SERIAL_8250) += 8250.o
68250_core-y := 8250.o 68250-y := 8250_core.o
78250_core-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o 78250-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
88250_core-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o 88250-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o
9obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o 9obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
10obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o 10obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
11obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o 11obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index d4a7c241b751..3467462869ce 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -158,7 +158,7 @@ struct atmel_uart_port {
158}; 158};
159 159
160static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; 160static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
161static unsigned long atmel_ports_in_use; 161static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
162 162
163#ifdef SUPPORT_SYSRQ 163#ifdef SUPPORT_SYSRQ
164static struct console atmel_console; 164static struct console atmel_console;
@@ -1769,15 +1769,14 @@ static int atmel_serial_probe(struct platform_device *pdev)
1769 if (ret < 0) 1769 if (ret < 0)
1770 /* port id not found in platform data nor device-tree aliases: 1770 /* port id not found in platform data nor device-tree aliases:
1771 * auto-enumerate it */ 1771 * auto-enumerate it */
1772 ret = find_first_zero_bit(&atmel_ports_in_use, 1772 ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
1773 sizeof(atmel_ports_in_use));
1774 1773
1775 if (ret > ATMEL_MAX_UART) { 1774 if (ret >= ATMEL_MAX_UART) {
1776 ret = -ENODEV; 1775 ret = -ENODEV;
1777 goto err; 1776 goto err;
1778 } 1777 }
1779 1778
1780 if (test_and_set_bit(ret, &atmel_ports_in_use)) { 1779 if (test_and_set_bit(ret, atmel_ports_in_use)) {
1781 /* port already in use */ 1780 /* port already in use */
1782 ret = -EBUSY; 1781 ret = -EBUSY;
1783 goto err; 1782 goto err;
@@ -1857,7 +1856,7 @@ static int atmel_serial_remove(struct platform_device *pdev)
1857 1856
1858 /* "port" is allocated statically, so we shouldn't free it */ 1857 /* "port" is allocated statically, so we shouldn't free it */
1859 1858
1860 clear_bit(port->line, &atmel_ports_in_use); 1859 clear_bit(port->line, atmel_ports_in_use);
1861 1860
1862 clk_put(atmel_port->clk); 1861 clk_put(atmel_port->clk);
1863 1862
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index e343d6670854..451687cb9685 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -968,6 +968,7 @@ static struct uart_ops sunsu_pops = {
968#define UART_NR 4 968#define UART_NR 4
969 969
970static struct uart_sunsu_port sunsu_ports[UART_NR]; 970static struct uart_sunsu_port sunsu_ports[UART_NR];
971static int nr_inst; /* Number of already registered ports */
971 972
972#ifdef CONFIG_SERIO 973#ifdef CONFIG_SERIO
973 974
@@ -1337,13 +1338,8 @@ static int __init sunsu_console_setup(struct console *co, char *options)
1337 printk("Console: ttyS%d (SU)\n", 1338 printk("Console: ttyS%d (SU)\n",
1338 (sunsu_reg.minor - 64) + co->index); 1339 (sunsu_reg.minor - 64) + co->index);
1339 1340
1340 /* 1341 if (co->index > nr_inst)
1341 * Check whether an invalid uart number has been specified, and 1342 return -ENODEV;
1342 * if so, search for the first available port that does have
1343 * console support.
1344 */
1345 if (co->index >= UART_NR)
1346 co->index = 0;
1347 port = &sunsu_ports[co->index].port; 1343 port = &sunsu_ports[co->index].port;
1348 1344
1349 /* 1345 /*
@@ -1408,7 +1404,6 @@ static enum su_type su_get_type(struct device_node *dp)
1408 1404
1409static int su_probe(struct platform_device *op) 1405static int su_probe(struct platform_device *op)
1410{ 1406{
1411 static int inst;
1412 struct device_node *dp = op->dev.of_node; 1407 struct device_node *dp = op->dev.of_node;
1413 struct uart_sunsu_port *up; 1408 struct uart_sunsu_port *up;
1414 struct resource *rp; 1409 struct resource *rp;
@@ -1418,16 +1413,16 @@ static int su_probe(struct platform_device *op)
1418 1413
1419 type = su_get_type(dp); 1414 type = su_get_type(dp);
1420 if (type == SU_PORT_PORT) { 1415 if (type == SU_PORT_PORT) {
1421 if (inst >= UART_NR) 1416 if (nr_inst >= UART_NR)
1422 return -EINVAL; 1417 return -EINVAL;
1423 up = &sunsu_ports[inst]; 1418 up = &sunsu_ports[nr_inst];
1424 } else { 1419 } else {
1425 up = kzalloc(sizeof(*up), GFP_KERNEL); 1420 up = kzalloc(sizeof(*up), GFP_KERNEL);
1426 if (!up) 1421 if (!up)
1427 return -ENOMEM; 1422 return -ENOMEM;
1428 } 1423 }
1429 1424
1430 up->port.line = inst; 1425 up->port.line = nr_inst;
1431 1426
1432 spin_lock_init(&up->port.lock); 1427 spin_lock_init(&up->port.lock);
1433 1428
@@ -1461,6 +1456,8 @@ static int su_probe(struct platform_device *op)
1461 } 1456 }
1462 dev_set_drvdata(&op->dev, up); 1457 dev_set_drvdata(&op->dev, up);
1463 1458
1459 nr_inst++;
1460
1464 return 0; 1461 return 0;
1465 } 1462 }
1466 1463
@@ -1488,7 +1485,7 @@ static int su_probe(struct platform_device *op)
1488 1485
1489 dev_set_drvdata(&op->dev, up); 1486 dev_set_drvdata(&op->dev, up);
1490 1487
1491 inst++; 1488 nr_inst++;
1492 1489
1493 return 0; 1490 return 0;
1494 1491
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index ba451c7209fc..f36bbba1ac8b 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -578,6 +578,8 @@ static int xuartps_startup(struct uart_port *port)
578 /* Receive Timeout register is enabled with value of 10 */ 578 /* Receive Timeout register is enabled with value of 10 */
579 xuartps_writel(10, XUARTPS_RXTOUT_OFFSET); 579 xuartps_writel(10, XUARTPS_RXTOUT_OFFSET);
580 580
581 /* Clear out any pending interrupts before enabling them */
582 xuartps_writel(xuartps_readl(XUARTPS_ISR_OFFSET), XUARTPS_ISR_OFFSET);
581 583
582 /* Set the Interrupt Registers with desired interrupts */ 584 /* Set the Interrupt Registers with desired interrupts */
583 xuartps_writel(XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_PARITY | 585 xuartps_writel(XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_PARITY |
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index e4ca345873c3..d7799deacb21 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -93,7 +93,7 @@ vcs_poll_data_free(struct vcs_poll_data *poll)
93static struct vcs_poll_data * 93static struct vcs_poll_data *
94vcs_poll_data_get(struct file *file) 94vcs_poll_data_get(struct file *file)
95{ 95{
96 struct vcs_poll_data *poll = file->private_data; 96 struct vcs_poll_data *poll = file->private_data, *kill = NULL;
97 97
98 if (poll) 98 if (poll)
99 return poll; 99 return poll;
@@ -122,10 +122,12 @@ vcs_poll_data_get(struct file *file)
122 file->private_data = poll; 122 file->private_data = poll;
123 } else { 123 } else {
124 /* someone else raced ahead of us */ 124 /* someone else raced ahead of us */
125 vcs_poll_data_free(poll); 125 kill = poll;
126 poll = file->private_data; 126 poll = file->private_data;
127 } 127 }
128 spin_unlock(&file->f_lock); 128 spin_unlock(&file->f_lock);
129 if (kill)
130 vcs_poll_data_free(kill);
129 131
130 return poll; 132 return poll;
131} 133}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 8ac25adf31b4..387dc6c8ad25 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -593,7 +593,6 @@ static void acm_port_destruct(struct tty_port *port)
593 593
594 dev_dbg(&acm->control->dev, "%s\n", __func__); 594 dev_dbg(&acm->control->dev, "%s\n", __func__);
595 595
596 tty_unregister_device(acm_tty_driver, acm->minor);
597 acm_release_minor(acm); 596 acm_release_minor(acm);
598 usb_put_intf(acm->control); 597 usb_put_intf(acm->control);
599 kfree(acm->country_codes); 598 kfree(acm->country_codes);
@@ -977,6 +976,8 @@ static int acm_probe(struct usb_interface *intf,
977 int num_rx_buf; 976 int num_rx_buf;
978 int i; 977 int i;
979 int combined_interfaces = 0; 978 int combined_interfaces = 0;
979 struct device *tty_dev;
980 int rv = -ENOMEM;
980 981
981 /* normal quirks */ 982 /* normal quirks */
982 quirks = (unsigned long)id->driver_info; 983 quirks = (unsigned long)id->driver_info;
@@ -1339,11 +1340,24 @@ skip_countries:
1339 usb_set_intfdata(data_interface, acm); 1340 usb_set_intfdata(data_interface, acm);
1340 1341
1341 usb_get_intf(control_interface); 1342 usb_get_intf(control_interface);
1342 tty_port_register_device(&acm->port, acm_tty_driver, minor, 1343 tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
1343 &control_interface->dev); 1344 &control_interface->dev);
1345 if (IS_ERR(tty_dev)) {
1346 rv = PTR_ERR(tty_dev);
1347 goto alloc_fail8;
1348 }
1344 1349
1345 return 0; 1350 return 0;
1351alloc_fail8:
1352 if (acm->country_codes) {
1353 device_remove_file(&acm->control->dev,
1354 &dev_attr_wCountryCodes);
1355 device_remove_file(&acm->control->dev,
1356 &dev_attr_iCountryCodeRelDate);
1357 }
1358 device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
1346alloc_fail7: 1359alloc_fail7:
1360 usb_set_intfdata(intf, NULL);
1347 for (i = 0; i < ACM_NW; i++) 1361 for (i = 0; i < ACM_NW; i++)
1348 usb_free_urb(acm->wb[i].urb); 1362 usb_free_urb(acm->wb[i].urb);
1349alloc_fail6: 1363alloc_fail6:
@@ -1359,7 +1373,7 @@ alloc_fail2:
1359 acm_release_minor(acm); 1373 acm_release_minor(acm);
1360 kfree(acm); 1374 kfree(acm);
1361alloc_fail: 1375alloc_fail:
1362 return -ENOMEM; 1376 return rv;
1363} 1377}
1364 1378
1365static void stop_data_traffic(struct acm *acm) 1379static void stop_data_traffic(struct acm *acm)
@@ -1411,6 +1425,8 @@ static void acm_disconnect(struct usb_interface *intf)
1411 1425
1412 stop_data_traffic(acm); 1426 stop_data_traffic(acm);
1413 1427
1428 tty_unregister_device(acm_tty_driver, acm->minor);
1429
1414 usb_free_urb(acm->ctrlurb); 1430 usb_free_urb(acm->ctrlurb);
1415 for (i = 0; i < ACM_NW; i++) 1431 for (i = 0; i < ACM_NW; i++)
1416 usb_free_urb(acm->wb[i].urb); 1432 usb_free_urb(acm->wb[i].urb);
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 622b4a48e732..2b487d4797bd 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -173,6 +173,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
173 struct hc_driver *driver; 173 struct hc_driver *driver;
174 struct usb_hcd *hcd; 174 struct usb_hcd *hcd;
175 int retval; 175 int retval;
176 int hcd_irq = 0;
176 177
177 if (usb_disabled()) 178 if (usb_disabled())
178 return -ENODEV; 179 return -ENODEV;
@@ -187,15 +188,19 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
187 return -ENODEV; 188 return -ENODEV;
188 dev->current_state = PCI_D0; 189 dev->current_state = PCI_D0;
189 190
190 /* The xHCI driver supports MSI and MSI-X, 191 /*
191 * so don't fail if the BIOS doesn't provide a legacy IRQ. 192 * The xHCI driver has its own irq management
193 * make sure irq setup is not touched for xhci in generic hcd code
192 */ 194 */
193 if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) { 195 if ((driver->flags & HCD_MASK) != HCD_USB3) {
194 dev_err(&dev->dev, 196 if (!dev->irq) {
195 "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", 197 dev_err(&dev->dev,
196 pci_name(dev)); 198 "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
197 retval = -ENODEV; 199 pci_name(dev));
198 goto disable_pci; 200 retval = -ENODEV;
201 goto disable_pci;
202 }
203 hcd_irq = dev->irq;
199 } 204 }
200 205
201 hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev)); 206 hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev));
@@ -245,7 +250,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
245 250
246 pci_set_master(dev); 251 pci_set_master(dev);
247 252
248 retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED); 253 retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED);
249 if (retval != 0) 254 if (retval != 0)
250 goto unmap_registers; 255 goto unmap_registers;
251 set_hs_companion(dev, hcd); 256 set_hs_companion(dev, hcd);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 99b34a30354f..f9ec44cbb82f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2412,6 +2412,14 @@ int usb_hcd_is_primary_hcd(struct usb_hcd *hcd)
2412} 2412}
2413EXPORT_SYMBOL_GPL(usb_hcd_is_primary_hcd); 2413EXPORT_SYMBOL_GPL(usb_hcd_is_primary_hcd);
2414 2414
2415int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1)
2416{
2417 if (!hcd->driver->find_raw_port_number)
2418 return port1;
2419
2420 return hcd->driver->find_raw_port_number(hcd, port1);
2421}
2422
2415static int usb_hcd_request_irqs(struct usb_hcd *hcd, 2423static int usb_hcd_request_irqs(struct usb_hcd *hcd,
2416 unsigned int irqnum, unsigned long irqflags) 2424 unsigned int irqnum, unsigned long irqflags)
2417{ 2425{
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index b6f4bad3f756..255c14464bf2 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/acpi.h> 16#include <linux/acpi.h>
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/usb/hcd.h>
18#include <acpi/acpi_bus.h> 19#include <acpi/acpi_bus.h>
19 20
20#include "usb.h" 21#include "usb.h"
@@ -188,8 +189,13 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
188 * connected to. 189 * connected to.
189 */ 190 */
190 if (!udev->parent) { 191 if (!udev->parent) {
191 *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev), 192 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
193 int raw_port_num;
194
195 raw_port_num = usb_hcd_find_raw_port_number(hcd,
192 port_num); 196 port_num);
197 *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev),
198 raw_port_num);
193 if (!*handle) 199 if (!*handle)
194 return -ENODEV; 200 return -ENODEV;
195 } else { 201 } else {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 5a0c541daf89..c7525b1cad74 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -145,6 +145,7 @@ config USB_LPC32XX
145 tristate "LPC32XX USB Peripheral Controller" 145 tristate "LPC32XX USB Peripheral Controller"
146 depends on ARCH_LPC32XX 146 depends on ARCH_LPC32XX
147 select USB_ISP1301 147 select USB_ISP1301
148 select USB_OTG_UTILS
148 help 149 help
149 This option selects the USB device controller in the LPC32xx SoC. 150 This option selects the USB device controller in the LPC32xx SoC.
150 151
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 71beeb833558..cc9c49c57c80 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -447,14 +447,13 @@ static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req)
447static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req) 447static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
448{ 448{
449 struct f_rndis *rndis = req->context; 449 struct f_rndis *rndis = req->context;
450 struct usb_composite_dev *cdev = rndis->port.func.config->cdev;
451 int status; 450 int status;
452 451
453 /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ 452 /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
454// spin_lock(&dev->lock); 453// spin_lock(&dev->lock);
455 status = rndis_msg_parser(rndis->config, (u8 *) req->buf); 454 status = rndis_msg_parser(rndis->config, (u8 *) req->buf);
456 if (status < 0) 455 if (status < 0)
457 ERROR(cdev, "RNDIS command error %d, %d/%d\n", 456 pr_err("RNDIS command error %d, %d/%d\n",
458 status, req->actual, req->length); 457 status, req->actual, req->length);
459// spin_unlock(&dev->lock); 458// spin_unlock(&dev->lock);
460} 459}
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
index 3953dd4d7186..3b343b23e4b0 100644
--- a/drivers/usb/gadget/g_ffs.c
+++ b/drivers/usb/gadget/g_ffs.c
@@ -357,7 +357,7 @@ static int gfs_bind(struct usb_composite_dev *cdev)
357 goto error; 357 goto error;
358 gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id; 358 gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id;
359 359
360 for (i = func_num; --i; ) { 360 for (i = func_num; i--; ) {
361 ret = functionfs_bind(ffs_tab[i].ffs_data, cdev); 361 ret = functionfs_bind(ffs_tab[i].ffs_data, cdev);
362 if (unlikely(ret < 0)) { 362 if (unlikely(ret < 0)) {
363 while (++i < func_num) 363 while (++i < func_num)
@@ -413,7 +413,7 @@ static int gfs_unbind(struct usb_composite_dev *cdev)
413 gether_cleanup(); 413 gether_cleanup();
414 gfs_ether_setup = false; 414 gfs_ether_setup = false;
415 415
416 for (i = func_num; --i; ) 416 for (i = func_num; i--; )
417 if (ffs_tab[i].ffs_data) 417 if (ffs_tab[i].ffs_data)
418 functionfs_unbind(ffs_tab[i].ffs_data); 418 functionfs_unbind(ffs_tab[i].ffs_data);
419 419
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
index d226058e3b88..32524b631959 100644
--- a/drivers/usb/gadget/net2272.c
+++ b/drivers/usb/gadget/net2272.c
@@ -59,7 +59,7 @@ static const char * const ep_name[] = {
59}; 59};
60 60
61#define DMA_ADDR_INVALID (~(dma_addr_t)0) 61#define DMA_ADDR_INVALID (~(dma_addr_t)0)
62#ifdef CONFIG_USB_GADGET_NET2272_DMA 62#ifdef CONFIG_USB_NET2272_DMA
63/* 63/*
64 * use_dma: the NET2272 can use an external DMA controller. 64 * use_dma: the NET2272 can use an external DMA controller.
65 * Note that since there is no generic DMA api, some functions, 65 * Note that since there is no generic DMA api, some functions,
@@ -1495,6 +1495,13 @@ stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1495 for (i = 0; i < 4; ++i) 1495 for (i = 0; i < 4; ++i)
1496 net2272_dequeue_all(&dev->ep[i]); 1496 net2272_dequeue_all(&dev->ep[i]);
1497 1497
1498 /* report disconnect; the driver is already quiesced */
1499 if (driver) {
1500 spin_unlock(&dev->lock);
1501 driver->disconnect(&dev->gadget);
1502 spin_lock(&dev->lock);
1503 }
1504
1498 net2272_usb_reinit(dev); 1505 net2272_usb_reinit(dev);
1499} 1506}
1500 1507
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index a1b650e11339..3bd0f992fb49 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -1924,7 +1924,6 @@ static int net2280_start(struct usb_gadget *_gadget,
1924err_func: 1924err_func:
1925 device_remove_file (&dev->pdev->dev, &dev_attr_function); 1925 device_remove_file (&dev->pdev->dev, &dev_attr_function);
1926err_unbind: 1926err_unbind:
1927 driver->unbind (&dev->gadget);
1928 dev->gadget.dev.driver = NULL; 1927 dev->gadget.dev.driver = NULL;
1929 dev->driver = NULL; 1928 dev->driver = NULL;
1930 return retval; 1929 return retval;
@@ -1946,6 +1945,13 @@ stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
1946 for (i = 0; i < 7; i++) 1945 for (i = 0; i < 7; i++)
1947 nuke (&dev->ep [i]); 1946 nuke (&dev->ep [i]);
1948 1947
1948 /* report disconnect; the driver is already quiesced */
1949 if (driver) {
1950 spin_unlock(&dev->lock);
1951 driver->disconnect(&dev->gadget);
1952 spin_lock(&dev->lock);
1953 }
1954
1949 usb_reinit (dev); 1955 usb_reinit (dev);
1950} 1956}
1951 1957
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index c5034d9c946b..b369292d4b90 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -136,7 +136,7 @@ static struct portmaster {
136 pr_debug(fmt, ##arg) 136 pr_debug(fmt, ##arg)
137#endif /* pr_vdebug */ 137#endif /* pr_vdebug */
138#else 138#else
139#ifndef pr_vdebig 139#ifndef pr_vdebug
140#define pr_vdebug(fmt, arg...) \ 140#define pr_vdebug(fmt, arg...) \
141 ({ if (0) pr_debug(fmt, ##arg); }) 141 ({ if (0) pr_debug(fmt, ##arg); })
142#endif /* pr_vdebug */ 142#endif /* pr_vdebug */
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 2a9cd369f71c..f8f62c3ed65e 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -216,7 +216,7 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
216 usb_gadget_disconnect(udc->gadget); 216 usb_gadget_disconnect(udc->gadget);
217 udc->driver->disconnect(udc->gadget); 217 udc->driver->disconnect(udc->gadget);
218 udc->driver->unbind(udc->gadget); 218 udc->driver->unbind(udc->gadget);
219 usb_gadget_udc_stop(udc->gadget, udc->driver); 219 usb_gadget_udc_stop(udc->gadget, NULL);
220 220
221 udc->driver = NULL; 221 udc->driver = NULL;
222 udc->dev.driver = NULL; 222 udc->dev.driver = NULL;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 5726cb144abf..416a6dce5e11 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -302,6 +302,7 @@ static void ehci_quiesce (struct ehci_hcd *ehci)
302 302
303static void end_unlink_async(struct ehci_hcd *ehci); 303static void end_unlink_async(struct ehci_hcd *ehci);
304static void unlink_empty_async(struct ehci_hcd *ehci); 304static void unlink_empty_async(struct ehci_hcd *ehci);
305static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
305static void ehci_work(struct ehci_hcd *ehci); 306static void ehci_work(struct ehci_hcd *ehci);
306static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); 307static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
307static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); 308static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 4d3b294f203e..7d06e77f6c4f 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -328,7 +328,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
328 ehci->rh_state = EHCI_RH_SUSPENDED; 328 ehci->rh_state = EHCI_RH_SUSPENDED;
329 329
330 end_unlink_async(ehci); 330 end_unlink_async(ehci);
331 unlink_empty_async(ehci); 331 unlink_empty_async_suspended(ehci);
332 ehci_handle_intr_unlinks(ehci); 332 ehci_handle_intr_unlinks(ehci);
333 end_free_itds(ehci); 333 end_free_itds(ehci);
334 334
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 5464665f0b6a..23d136904285 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1316,6 +1316,19 @@ static void unlink_empty_async(struct ehci_hcd *ehci)
1316 } 1316 }
1317} 1317}
1318 1318
1319/* The root hub is suspended; unlink all the async QHs */
1320static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
1321{
1322 struct ehci_qh *qh;
1323
1324 while (ehci->async->qh_next.qh) {
1325 qh = ehci->async->qh_next.qh;
1326 WARN_ON(!list_empty(&qh->qtd_list));
1327 single_unlink_async(ehci, qh);
1328 }
1329 start_iaa_cycle(ehci, false);
1330}
1331
1319/* makes sure the async qh will become idle */ 1332/* makes sure the async qh will become idle */
1320/* caller must own ehci->lock */ 1333/* caller must own ehci->lock */
1321 1334
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index b476daf49f6f..010f686d8881 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1214,6 +1214,7 @@ itd_urb_transaction (
1214 1214
1215 memset (itd, 0, sizeof *itd); 1215 memset (itd, 0, sizeof *itd);
1216 itd->itd_dma = itd_dma; 1216 itd->itd_dma = itd_dma;
1217 itd->frame = 9999; /* an invalid value */
1217 list_add (&itd->itd_list, &sched->td_list); 1218 list_add (&itd->itd_list, &sched->td_list);
1218 } 1219 }
1219 spin_unlock_irqrestore (&ehci->lock, flags); 1220 spin_unlock_irqrestore (&ehci->lock, flags);
@@ -1915,6 +1916,7 @@ sitd_urb_transaction (
1915 1916
1916 memset (sitd, 0, sizeof *sitd); 1917 memset (sitd, 0, sizeof *sitd);
1917 sitd->sitd_dma = sitd_dma; 1918 sitd->sitd_dma = sitd_dma;
1919 sitd->frame = 9999; /* an invalid value */
1918 list_add (&sitd->sitd_list, &iso_sched->td_list); 1920 list_add (&sitd->sitd_list, &iso_sched->td_list);
1919 } 1921 }
1920 1922
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 20dbdcbe9b0f..c3fa1305f830 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -304,7 +304,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
304 * (a) SMP races against real IAA firing and retriggering, and 304 * (a) SMP races against real IAA firing and retriggering, and
305 * (b) clean HC shutdown, when IAA watchdog was pending. 305 * (b) clean HC shutdown, when IAA watchdog was pending.
306 */ 306 */
307 if (ehci->async_iaa) { 307 if (1) {
308 u32 cmd, status; 308 u32 cmd, status;
309 309
310 /* If we get here, IAA is *REALLY* late. It's barely 310 /* If we get here, IAA is *REALLY* late. It's barely
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 35616ffbe3ae..6dc238c592bc 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1022,44 +1022,24 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1022 * is attached to (or the roothub port its ancestor hub is attached to). All we 1022 * is attached to (or the roothub port its ancestor hub is attached to). All we
1023 * know is the index of that port under either the USB 2.0 or the USB 3.0 1023 * know is the index of that port under either the USB 2.0 or the USB 3.0
1024 * roothub, but that doesn't give us the real index into the HW port status 1024 * roothub, but that doesn't give us the real index into the HW port status
1025 * registers. Scan through the xHCI roothub port array, looking for the Nth 1025 * registers. Call xhci_find_raw_port_number() to get real index.
1026 * entry of the correct port speed. Return the port number of that entry.
1027 */ 1026 */
1028static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, 1027static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1029 struct usb_device *udev) 1028 struct usb_device *udev)
1030{ 1029{
1031 struct usb_device *top_dev; 1030 struct usb_device *top_dev;
1032 unsigned int num_similar_speed_ports; 1031 struct usb_hcd *hcd;
1033 unsigned int faked_port_num; 1032
1034 int i; 1033 if (udev->speed == USB_SPEED_SUPER)
1034 hcd = xhci->shared_hcd;
1035 else
1036 hcd = xhci->main_hcd;
1035 1037
1036 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 1038 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1037 top_dev = top_dev->parent) 1039 top_dev = top_dev->parent)
1038 /* Found device below root hub */; 1040 /* Found device below root hub */;
1039 faked_port_num = top_dev->portnum;
1040 for (i = 0, num_similar_speed_ports = 0;
1041 i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
1042 u8 port_speed = xhci->port_array[i];
1043
1044 /*
1045 * Skip ports that don't have known speeds, or have duplicate
1046 * Extended Capabilities port speed entries.
1047 */
1048 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1049 continue;
1050 1041
1051 /* 1042 return xhci_find_raw_port_number(hcd, top_dev->portnum);
1052 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
1053 * 1.1 ports are under the USB 2.0 hub. If the port speed
1054 * matches the device speed, it's a similar speed port.
1055 */
1056 if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER))
1057 num_similar_speed_ports++;
1058 if (num_similar_speed_ports == faked_port_num)
1059 /* Roothub ports are numbered from 1 to N */
1060 return i+1;
1061 }
1062 return 0;
1063} 1043}
1064 1044
1065/* Setup an xHCI virtual device for a Set Address command */ 1045/* Setup an xHCI virtual device for a Set Address command */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index af259e0ec172..1a30c380043c 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -313,6 +313,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
313 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, 313 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
314 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, 314 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
315 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, 315 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
316 .find_raw_port_number = xhci_find_raw_port_number,
316}; 317};
317 318
318/*-------------------------------------------------------------------------*/ 319/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 882875465301..1969c001b3f9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1599,14 +1599,20 @@ static void handle_port_status(struct xhci_hcd *xhci,
1599 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1599 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1600 if ((port_id <= 0) || (port_id > max_ports)) { 1600 if ((port_id <= 0) || (port_id > max_ports)) {
1601 xhci_warn(xhci, "Invalid port id %d\n", port_id); 1601 xhci_warn(xhci, "Invalid port id %d\n", port_id);
1602 bogus_port_status = true; 1602 inc_deq(xhci, xhci->event_ring);
1603 goto cleanup; 1603 return;
1604 } 1604 }
1605 1605
1606 /* Figure out which usb_hcd this port is attached to: 1606 /* Figure out which usb_hcd this port is attached to:
1607 * is it a USB 3.0 port or a USB 2.0/1.1 port? 1607 * is it a USB 3.0 port or a USB 2.0/1.1 port?
1608 */ 1608 */
1609 major_revision = xhci->port_array[port_id - 1]; 1609 major_revision = xhci->port_array[port_id - 1];
1610
1611 /* Find the right roothub. */
1612 hcd = xhci_to_hcd(xhci);
1613 if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1614 hcd = xhci->shared_hcd;
1615
1610 if (major_revision == 0) { 1616 if (major_revision == 0) {
1611 xhci_warn(xhci, "Event for port %u not in " 1617 xhci_warn(xhci, "Event for port %u not in "
1612 "Extended Capabilities, ignoring.\n", 1618 "Extended Capabilities, ignoring.\n",
@@ -1629,10 +1635,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
1629 * into the index into the ports on the correct split roothub, and the 1635 * into the index into the ports on the correct split roothub, and the
1630 * correct bus_state structure. 1636 * correct bus_state structure.
1631 */ 1637 */
1632 /* Find the right roothub. */
1633 hcd = xhci_to_hcd(xhci);
1634 if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1635 hcd = xhci->shared_hcd;
1636 bus_state = &xhci->bus_state[hcd_index(hcd)]; 1638 bus_state = &xhci->bus_state[hcd_index(hcd)];
1637 if (hcd->speed == HCD_USB3) 1639 if (hcd->speed == HCD_USB3)
1638 port_array = xhci->usb3_ports; 1640 port_array = xhci->usb3_ports;
@@ -2027,8 +2029,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
2027 if (event_trb != ep_ring->dequeue && 2029 if (event_trb != ep_ring->dequeue &&
2028 event_trb != td->last_trb) 2030 event_trb != td->last_trb)
2029 td->urb->actual_length = 2031 td->urb->actual_length =
2030 td->urb->transfer_buffer_length 2032 td->urb->transfer_buffer_length -
2031 - TRB_LEN(le32_to_cpu(event->transfer_len)); 2033 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2032 else 2034 else
2033 td->urb->actual_length = 0; 2035 td->urb->actual_length = 0;
2034 2036
@@ -2060,7 +2062,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
2060 /* Maybe the event was for the data stage? */ 2062 /* Maybe the event was for the data stage? */
2061 td->urb->actual_length = 2063 td->urb->actual_length =
2062 td->urb->transfer_buffer_length - 2064 td->urb->transfer_buffer_length -
2063 TRB_LEN(le32_to_cpu(event->transfer_len)); 2065 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2064 xhci_dbg(xhci, "Waiting for status " 2066 xhci_dbg(xhci, "Waiting for status "
2065 "stage event\n"); 2067 "stage event\n");
2066 return 0; 2068 return 0;
@@ -2096,7 +2098,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2096 /* handle completion code */ 2098 /* handle completion code */
2097 switch (trb_comp_code) { 2099 switch (trb_comp_code) {
2098 case COMP_SUCCESS: 2100 case COMP_SUCCESS:
2099 if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { 2101 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2100 frame->status = 0; 2102 frame->status = 0;
2101 break; 2103 break;
2102 } 2104 }
@@ -2141,7 +2143,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2141 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 2143 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2142 } 2144 }
2143 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 2145 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2144 TRB_LEN(le32_to_cpu(event->transfer_len)); 2146 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2145 2147
2146 if (trb_comp_code != COMP_STOP_INVAL) { 2148 if (trb_comp_code != COMP_STOP_INVAL) {
2147 frame->actual_length = len; 2149 frame->actual_length = len;
@@ -2199,7 +2201,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2199 case COMP_SUCCESS: 2201 case COMP_SUCCESS:
2200 /* Double check that the HW transferred everything. */ 2202 /* Double check that the HW transferred everything. */
2201 if (event_trb != td->last_trb || 2203 if (event_trb != td->last_trb ||
2202 TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 2204 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2203 xhci_warn(xhci, "WARN Successful completion " 2205 xhci_warn(xhci, "WARN Successful completion "
2204 "on short TX\n"); 2206 "on short TX\n");
2205 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 2207 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
@@ -2227,18 +2229,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2227 "%d bytes untransferred\n", 2229 "%d bytes untransferred\n",
2228 td->urb->ep->desc.bEndpointAddress, 2230 td->urb->ep->desc.bEndpointAddress,
2229 td->urb->transfer_buffer_length, 2231 td->urb->transfer_buffer_length,
2230 TRB_LEN(le32_to_cpu(event->transfer_len))); 2232 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2231 /* Fast path - was this the last TRB in the TD for this URB? */ 2233 /* Fast path - was this the last TRB in the TD for this URB? */
2232 if (event_trb == td->last_trb) { 2234 if (event_trb == td->last_trb) {
2233 if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 2235 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2234 td->urb->actual_length = 2236 td->urb->actual_length =
2235 td->urb->transfer_buffer_length - 2237 td->urb->transfer_buffer_length -
2236 TRB_LEN(le32_to_cpu(event->transfer_len)); 2238 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2237 if (td->urb->transfer_buffer_length < 2239 if (td->urb->transfer_buffer_length <
2238 td->urb->actual_length) { 2240 td->urb->actual_length) {
2239 xhci_warn(xhci, "HC gave bad length " 2241 xhci_warn(xhci, "HC gave bad length "
2240 "of %d bytes left\n", 2242 "of %d bytes left\n",
2241 TRB_LEN(le32_to_cpu(event->transfer_len))); 2243 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2242 td->urb->actual_length = 0; 2244 td->urb->actual_length = 0;
2243 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 2245 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2244 *status = -EREMOTEIO; 2246 *status = -EREMOTEIO;
@@ -2280,7 +2282,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2280 if (trb_comp_code != COMP_STOP_INVAL) 2282 if (trb_comp_code != COMP_STOP_INVAL)
2281 td->urb->actual_length += 2283 td->urb->actual_length +=
2282 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 2284 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2283 TRB_LEN(le32_to_cpu(event->transfer_len)); 2285 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2284 } 2286 }
2285 2287
2286 return finish_td(xhci, td, event_trb, event, ep, status, false); 2288 return finish_td(xhci, td, event_trb, event, ep, status, false);
@@ -2368,7 +2370,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2368 * transfer type 2370 * transfer type
2369 */ 2371 */
2370 case COMP_SUCCESS: 2372 case COMP_SUCCESS:
2371 if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) 2373 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2372 break; 2374 break;
2373 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) 2375 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2374 trb_comp_code = COMP_SHORT_TX; 2376 trb_comp_code = COMP_SHORT_TX;
@@ -2461,14 +2463,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2461 * TD list. 2463 * TD list.
2462 */ 2464 */
2463 if (list_empty(&ep_ring->td_list)) { 2465 if (list_empty(&ep_ring->td_list)) {
2464 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " 2466 /*
2465 "with no TDs queued?\n", 2467 * A stopped endpoint may generate an extra completion
2466 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2468 * event if the device was suspended. Don't print
2467 ep_index); 2469 * warnings.
2468 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 2470 */
2469 (le32_to_cpu(event->flags) & 2471 if (!(trb_comp_code == COMP_STOP ||
2470 TRB_TYPE_BITMASK)>>10); 2472 trb_comp_code == COMP_STOP_INVAL)) {
2471 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 2473 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2474 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2475 ep_index);
2476 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2477 (le32_to_cpu(event->flags) &
2478 TRB_TYPE_BITMASK)>>10);
2479 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2480 }
2472 if (ep->skip) { 2481 if (ep->skip) {
2473 ep->skip = false; 2482 ep->skip = false;
2474 xhci_dbg(xhci, "td_list is empty while skip " 2483 xhci_dbg(xhci, "td_list is empty while skip "
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f1f01a834ba7..53b8f89a0b1c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -350,7 +350,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
350 * generate interrupts. Don't even try to enable MSI. 350 * generate interrupts. Don't even try to enable MSI.
351 */ 351 */
352 if (xhci->quirks & XHCI_BROKEN_MSI) 352 if (xhci->quirks & XHCI_BROKEN_MSI)
353 return 0; 353 goto legacy_irq;
354 354
355 /* unregister the legacy interrupt */ 355 /* unregister the legacy interrupt */
356 if (hcd->irq) 356 if (hcd->irq)
@@ -371,6 +371,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
371 return -EINVAL; 371 return -EINVAL;
372 } 372 }
373 373
374 legacy_irq:
374 /* fall back to legacy interrupt*/ 375 /* fall back to legacy interrupt*/
375 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, 376 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
376 hcd->irq_descr, hcd); 377 hcd->irq_descr, hcd);
@@ -3778,6 +3779,28 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3778 return 0; 3779 return 0;
3779} 3780}
3780 3781
3782/*
3783 * Transfer the port index into real index in the HW port status
3784 * registers. Caculate offset between the port's PORTSC register
3785 * and port status base. Divide the number of per port register
3786 * to get the real index. The raw port number bases 1.
3787 */
3788int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3789{
3790 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3791 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
3792 __le32 __iomem *addr;
3793 int raw_port;
3794
3795 if (hcd->speed != HCD_USB3)
3796 addr = xhci->usb2_ports[port1 - 1];
3797 else
3798 addr = xhci->usb3_ports[port1 - 1];
3799
3800 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
3801 return raw_port;
3802}
3803
3781#ifdef CONFIG_USB_SUSPEND 3804#ifdef CONFIG_USB_SUSPEND
3782 3805
3783/* BESL to HIRD Encoding array for USB2 LPM */ 3806/* BESL to HIRD Encoding array for USB2 LPM */
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f791bd0aee6c..63582719e0fb 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -206,8 +206,8 @@ struct xhci_op_regs {
206/* bits 12:31 are reserved (and should be preserved on writes). */ 206/* bits 12:31 are reserved (and should be preserved on writes). */
207 207
208/* IMAN - Interrupt Management Register */ 208/* IMAN - Interrupt Management Register */
209#define IMAN_IP (1 << 1) 209#define IMAN_IE (1 << 1)
210#define IMAN_IE (1 << 0) 210#define IMAN_IP (1 << 0)
211 211
212/* USBSTS - USB status - status bitmasks */ 212/* USBSTS - USB status - status bitmasks */
213/* HC not running - set to 1 when run/stop bit is cleared. */ 213/* HC not running - set to 1 when run/stop bit is cleared. */
@@ -972,6 +972,10 @@ struct xhci_transfer_event {
972 __le32 flags; 972 __le32 flags;
973}; 973};
974 974
975/* Transfer event TRB length bit mask */
976/* bits 0:23 */
977#define EVENT_TRB_LEN(p) ((p) & 0xffffff)
978
975/** Transfer Event bit fields **/ 979/** Transfer Event bit fields **/
976#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f) 980#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
977 981
@@ -1829,6 +1833,7 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
1829int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, 1833int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
1830 char *buf, u16 wLength); 1834 char *buf, u16 wLength);
1831int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); 1835int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
1836int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
1832 1837
1833#ifdef CONFIG_PM 1838#ifdef CONFIG_PM
1834int xhci_bus_suspend(struct usb_hcd *hcd); 1839int xhci_bus_suspend(struct usb_hcd *hcd);
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 7c71769d71ff..41613a2b35e8 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -327,7 +327,7 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
327 u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 327 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
328 int err; 328 int err;
329 329
330 err = musb->int_usb & USB_INTR_VBUSERROR; 330 err = musb->int_usb & MUSB_INTR_VBUSERROR;
331 if (err) { 331 if (err) {
332 /* 332 /*
333 * The Mentor core doesn't debounce VBUS as needed 333 * The Mentor core doesn't debounce VBUS as needed
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index be18537c5f14..83eddedcd9be 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -141,7 +141,9 @@ static inline void map_dma_buffer(struct musb_request *request,
141static inline void unmap_dma_buffer(struct musb_request *request, 141static inline void unmap_dma_buffer(struct musb_request *request,
142 struct musb *musb) 142 struct musb *musb)
143{ 143{
144 if (!is_buffer_mapped(request)) 144 struct musb_ep *musb_ep = request->ep;
145
146 if (!is_buffer_mapped(request) || !musb_ep->dma)
145 return; 147 return;
146 148
147 if (request->request.dma == DMA_ADDR_INVALID) { 149 if (request->request.dma == DMA_ADDR_INVALID) {
@@ -195,7 +197,10 @@ __acquires(ep->musb->lock)
195 197
196 ep->busy = 1; 198 ep->busy = 1;
197 spin_unlock(&musb->lock); 199 spin_unlock(&musb->lock);
198 unmap_dma_buffer(req, musb); 200
201 if (!dma_mapping_error(&musb->g.dev, request->dma))
202 unmap_dma_buffer(req, musb);
203
199 if (request->status == 0) 204 if (request->status == 0)
200 dev_dbg(musb->controller, "%s done request %p, %d/%d\n", 205 dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
201 ep->end_point.name, request, 206 ep->end_point.name, request,
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 65217a590068..90549382eba5 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -38,6 +38,7 @@ config USB_ISP1301
38 tristate "NXP ISP1301 USB transceiver support" 38 tristate "NXP ISP1301 USB transceiver support"
39 depends on USB || USB_GADGET 39 depends on USB || USB_GADGET
40 depends on I2C 40 depends on I2C
41 select USB_OTG_UTILS
41 help 42 help
42 Say Y here to add support for the NXP ISP1301 USB transceiver driver. 43 Say Y here to add support for the NXP ISP1301 USB transceiver driver.
43 This chip is typically used as USB transceiver for USB host, gadget 44 This chip is typically used as USB transceiver for USB host, gadget
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index cbd904b8fba5..4775f8209e55 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -62,7 +62,6 @@ static int is_irda(struct usb_serial *serial)
62} 62}
63 63
64struct ark3116_private { 64struct ark3116_private {
65 wait_queue_head_t delta_msr_wait;
66 struct async_icount icount; 65 struct async_icount icount;
67 int irda; /* 1 for irda device */ 66 int irda; /* 1 for irda device */
68 67
@@ -146,7 +145,6 @@ static int ark3116_port_probe(struct usb_serial_port *port)
146 if (!priv) 145 if (!priv)
147 return -ENOMEM; 146 return -ENOMEM;
148 147
149 init_waitqueue_head(&priv->delta_msr_wait);
150 mutex_init(&priv->hw_lock); 148 mutex_init(&priv->hw_lock);
151 spin_lock_init(&priv->status_lock); 149 spin_lock_init(&priv->status_lock);
152 150
@@ -456,10 +454,14 @@ static int ark3116_ioctl(struct tty_struct *tty,
456 case TIOCMIWAIT: 454 case TIOCMIWAIT:
457 for (;;) { 455 for (;;) {
458 struct async_icount prev = priv->icount; 456 struct async_icount prev = priv->icount;
459 interruptible_sleep_on(&priv->delta_msr_wait); 457 interruptible_sleep_on(&port->delta_msr_wait);
460 /* see if a signal did it */ 458 /* see if a signal did it */
461 if (signal_pending(current)) 459 if (signal_pending(current))
462 return -ERESTARTSYS; 460 return -ERESTARTSYS;
461
462 if (port->serial->disconnected)
463 return -EIO;
464
463 if ((prev.rng == priv->icount.rng) && 465 if ((prev.rng == priv->icount.rng) &&
464 (prev.dsr == priv->icount.dsr) && 466 (prev.dsr == priv->icount.dsr) &&
465 (prev.dcd == priv->icount.dcd) && 467 (prev.dcd == priv->icount.dcd) &&
@@ -580,7 +582,7 @@ static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr)
580 priv->icount.dcd++; 582 priv->icount.dcd++;
581 if (msr & UART_MSR_TERI) 583 if (msr & UART_MSR_TERI)
582 priv->icount.rng++; 584 priv->icount.rng++;
583 wake_up_interruptible(&priv->delta_msr_wait); 585 wake_up_interruptible(&port->delta_msr_wait);
584 } 586 }
585} 587}
586 588
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index d255f66e708e..07d4650a32ab 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -80,7 +80,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
80 80
81struct ch341_private { 81struct ch341_private {
82 spinlock_t lock; /* access lock */ 82 spinlock_t lock; /* access lock */
83 wait_queue_head_t delta_msr_wait; /* wait queue for modem status */
84 unsigned baud_rate; /* set baud rate */ 83 unsigned baud_rate; /* set baud rate */
85 u8 line_control; /* set line control value RTS/DTR */ 84 u8 line_control; /* set line control value RTS/DTR */
86 u8 line_status; /* active status of modem control inputs */ 85 u8 line_status; /* active status of modem control inputs */
@@ -252,7 +251,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
252 return -ENOMEM; 251 return -ENOMEM;
253 252
254 spin_lock_init(&priv->lock); 253 spin_lock_init(&priv->lock);
255 init_waitqueue_head(&priv->delta_msr_wait);
256 priv->baud_rate = DEFAULT_BAUD_RATE; 254 priv->baud_rate = DEFAULT_BAUD_RATE;
257 priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; 255 priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
258 256
@@ -298,7 +296,7 @@ static void ch341_dtr_rts(struct usb_serial_port *port, int on)
298 priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR); 296 priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR);
299 spin_unlock_irqrestore(&priv->lock, flags); 297 spin_unlock_irqrestore(&priv->lock, flags);
300 ch341_set_handshake(port->serial->dev, priv->line_control); 298 ch341_set_handshake(port->serial->dev, priv->line_control);
301 wake_up_interruptible(&priv->delta_msr_wait); 299 wake_up_interruptible(&port->delta_msr_wait);
302} 300}
303 301
304static void ch341_close(struct usb_serial_port *port) 302static void ch341_close(struct usb_serial_port *port)
@@ -491,7 +489,7 @@ static void ch341_read_int_callback(struct urb *urb)
491 tty_kref_put(tty); 489 tty_kref_put(tty);
492 } 490 }
493 491
494 wake_up_interruptible(&priv->delta_msr_wait); 492 wake_up_interruptible(&port->delta_msr_wait);
495 } 493 }
496 494
497exit: 495exit:
@@ -517,11 +515,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
517 spin_unlock_irqrestore(&priv->lock, flags); 515 spin_unlock_irqrestore(&priv->lock, flags);
518 516
519 while (!multi_change) { 517 while (!multi_change) {
520 interruptible_sleep_on(&priv->delta_msr_wait); 518 interruptible_sleep_on(&port->delta_msr_wait);
521 /* see if a signal did it */ 519 /* see if a signal did it */
522 if (signal_pending(current)) 520 if (signal_pending(current))
523 return -ERESTARTSYS; 521 return -ERESTARTSYS;
524 522
523 if (port->serial->disconnected)
524 return -EIO;
525
525 spin_lock_irqsave(&priv->lock, flags); 526 spin_lock_irqsave(&priv->lock, flags);
526 status = priv->line_status; 527 status = priv->line_status;
527 multi_change = priv->multi_status_change; 528 multi_change = priv->multi_status_change;
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 8efa19d0e9fb..ba7352e4187e 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -111,7 +111,6 @@ struct cypress_private {
111 int baud_rate; /* stores current baud rate in 111 int baud_rate; /* stores current baud rate in
112 integer form */ 112 integer form */
113 int isthrottled; /* if throttled, discard reads */ 113 int isthrottled; /* if throttled, discard reads */
114 wait_queue_head_t delta_msr_wait; /* used for TIOCMIWAIT */
115 char prev_status, diff_status; /* used for TIOCMIWAIT */ 114 char prev_status, diff_status; /* used for TIOCMIWAIT */
116 /* we pass a pointer to this as the argument sent to 115 /* we pass a pointer to this as the argument sent to
117 cypress_set_termios old_termios */ 116 cypress_set_termios old_termios */
@@ -449,7 +448,6 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
449 kfree(priv); 448 kfree(priv);
450 return -ENOMEM; 449 return -ENOMEM;
451 } 450 }
452 init_waitqueue_head(&priv->delta_msr_wait);
453 451
454 usb_reset_configuration(serial->dev); 452 usb_reset_configuration(serial->dev);
455 453
@@ -868,12 +866,16 @@ static int cypress_ioctl(struct tty_struct *tty,
868 switch (cmd) { 866 switch (cmd) {
869 /* This code comes from drivers/char/serial.c and ftdi_sio.c */ 867 /* This code comes from drivers/char/serial.c and ftdi_sio.c */
870 case TIOCMIWAIT: 868 case TIOCMIWAIT:
871 while (priv != NULL) { 869 for (;;) {
872 interruptible_sleep_on(&priv->delta_msr_wait); 870 interruptible_sleep_on(&port->delta_msr_wait);
873 /* see if a signal did it */ 871 /* see if a signal did it */
874 if (signal_pending(current)) 872 if (signal_pending(current))
875 return -ERESTARTSYS; 873 return -ERESTARTSYS;
876 else { 874
875 if (port->serial->disconnected)
876 return -EIO;
877
878 {
877 char diff = priv->diff_status; 879 char diff = priv->diff_status;
878 if (diff == 0) 880 if (diff == 0)
879 return -EIO; /* no change => error */ 881 return -EIO; /* no change => error */
@@ -1187,7 +1189,7 @@ static void cypress_read_int_callback(struct urb *urb)
1187 if (priv->current_status != priv->prev_status) { 1189 if (priv->current_status != priv->prev_status) {
1188 priv->diff_status |= priv->current_status ^ 1190 priv->diff_status |= priv->current_status ^
1189 priv->prev_status; 1191 priv->prev_status;
1190 wake_up_interruptible(&priv->delta_msr_wait); 1192 wake_up_interruptible(&port->delta_msr_wait);
1191 priv->prev_status = priv->current_status; 1193 priv->prev_status = priv->current_status;
1192 } 1194 }
1193 spin_unlock_irqrestore(&priv->lock, flags); 1195 spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index b1b2dc64b50b..a172ad5c5ce8 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -47,7 +47,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
47 47
48struct f81232_private { 48struct f81232_private {
49 spinlock_t lock; 49 spinlock_t lock;
50 wait_queue_head_t delta_msr_wait;
51 u8 line_control; 50 u8 line_control;
52 u8 line_status; 51 u8 line_status;
53}; 52};
@@ -111,7 +110,7 @@ static void f81232_process_read_urb(struct urb *urb)
111 line_status = priv->line_status; 110 line_status = priv->line_status;
112 priv->line_status &= ~UART_STATE_TRANSIENT_MASK; 111 priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
113 spin_unlock_irqrestore(&priv->lock, flags); 112 spin_unlock_irqrestore(&priv->lock, flags);
114 wake_up_interruptible(&priv->delta_msr_wait); 113 wake_up_interruptible(&port->delta_msr_wait);
115 114
116 if (!urb->actual_length) 115 if (!urb->actual_length)
117 return; 116 return;
@@ -256,11 +255,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
256 spin_unlock_irqrestore(&priv->lock, flags); 255 spin_unlock_irqrestore(&priv->lock, flags);
257 256
258 while (1) { 257 while (1) {
259 interruptible_sleep_on(&priv->delta_msr_wait); 258 interruptible_sleep_on(&port->delta_msr_wait);
260 /* see if a signal did it */ 259 /* see if a signal did it */
261 if (signal_pending(current)) 260 if (signal_pending(current))
262 return -ERESTARTSYS; 261 return -ERESTARTSYS;
263 262
263 if (port->serial->disconnected)
264 return -EIO;
265
264 spin_lock_irqsave(&priv->lock, flags); 266 spin_lock_irqsave(&priv->lock, flags);
265 status = priv->line_status; 267 status = priv->line_status;
266 spin_unlock_irqrestore(&priv->lock, flags); 268 spin_unlock_irqrestore(&priv->lock, flags);
@@ -322,7 +324,6 @@ static int f81232_port_probe(struct usb_serial_port *port)
322 return -ENOMEM; 324 return -ENOMEM;
323 325
324 spin_lock_init(&priv->lock); 326 spin_lock_init(&priv->lock);
325 init_waitqueue_head(&priv->delta_msr_wait);
326 327
327 usb_set_serial_port_data(port, priv); 328 usb_set_serial_port_data(port, priv);
328 329
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index edd162df49ca..9886180e45f1 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -69,9 +69,7 @@ struct ftdi_private {
69 int flags; /* some ASYNC_xxxx flags are supported */ 69 int flags; /* some ASYNC_xxxx flags are supported */
70 unsigned long last_dtr_rts; /* saved modem control outputs */ 70 unsigned long last_dtr_rts; /* saved modem control outputs */
71 struct async_icount icount; 71 struct async_icount icount;
72 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
73 char prev_status; /* Used for TIOCMIWAIT */ 72 char prev_status; /* Used for TIOCMIWAIT */
74 bool dev_gone; /* Used to abort TIOCMIWAIT */
75 char transmit_empty; /* If transmitter is empty or not */ 73 char transmit_empty; /* If transmitter is empty or not */
76 __u16 interface; /* FT2232C, FT2232H or FT4232H port interface 74 __u16 interface; /* FT2232C, FT2232H or FT4232H port interface
77 (0 for FT232/245) */ 75 (0 for FT232/245) */
@@ -642,6 +640,7 @@ static struct usb_device_id id_table_combined [] = {
642 { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, 640 { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
643 { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, 641 { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
644 { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, 642 { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
643 { USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) },
645 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, 644 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
646 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, 645 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
647 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, 646 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
@@ -1691,10 +1690,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
1691 1690
1692 kref_init(&priv->kref); 1691 kref_init(&priv->kref);
1693 mutex_init(&priv->cfg_lock); 1692 mutex_init(&priv->cfg_lock);
1694 init_waitqueue_head(&priv->delta_msr_wait);
1695 1693
1696 priv->flags = ASYNC_LOW_LATENCY; 1694 priv->flags = ASYNC_LOW_LATENCY;
1697 priv->dev_gone = false;
1698 1695
1699 if (quirk && quirk->port_probe) 1696 if (quirk && quirk->port_probe)
1700 quirk->port_probe(priv); 1697 quirk->port_probe(priv);
@@ -1840,8 +1837,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
1840{ 1837{
1841 struct ftdi_private *priv = usb_get_serial_port_data(port); 1838 struct ftdi_private *priv = usb_get_serial_port_data(port);
1842 1839
1843 priv->dev_gone = true; 1840 wake_up_interruptible(&port->delta_msr_wait);
1844 wake_up_interruptible_all(&priv->delta_msr_wait);
1845 1841
1846 remove_sysfs_attrs(port); 1842 remove_sysfs_attrs(port);
1847 1843
@@ -1989,7 +1985,7 @@ static int ftdi_process_packet(struct usb_serial_port *port,
1989 if (diff_status & FTDI_RS0_RLSD) 1985 if (diff_status & FTDI_RS0_RLSD)
1990 priv->icount.dcd++; 1986 priv->icount.dcd++;
1991 1987
1992 wake_up_interruptible_all(&priv->delta_msr_wait); 1988 wake_up_interruptible(&port->delta_msr_wait);
1993 priv->prev_status = status; 1989 priv->prev_status = status;
1994 } 1990 }
1995 1991
@@ -2440,11 +2436,15 @@ static int ftdi_ioctl(struct tty_struct *tty,
2440 */ 2436 */
2441 case TIOCMIWAIT: 2437 case TIOCMIWAIT:
2442 cprev = priv->icount; 2438 cprev = priv->icount;
2443 while (!priv->dev_gone) { 2439 for (;;) {
2444 interruptible_sleep_on(&priv->delta_msr_wait); 2440 interruptible_sleep_on(&port->delta_msr_wait);
2445 /* see if a signal did it */ 2441 /* see if a signal did it */
2446 if (signal_pending(current)) 2442 if (signal_pending(current))
2447 return -ERESTARTSYS; 2443 return -ERESTARTSYS;
2444
2445 if (port->serial->disconnected)
2446 return -EIO;
2447
2448 cnow = priv->icount; 2448 cnow = priv->icount;
2449 if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || 2449 if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
2450 ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || 2450 ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
@@ -2454,8 +2454,6 @@ static int ftdi_ioctl(struct tty_struct *tty,
2454 } 2454 }
2455 cprev = cnow; 2455 cprev = cnow;
2456 } 2456 }
2457 return -EIO;
2458 break;
2459 case TIOCSERGETLSR: 2457 case TIOCSERGETLSR:
2460 return get_lsr_info(port, (struct serial_struct __user *)arg); 2458 return get_lsr_info(port, (struct serial_struct __user *)arg);
2461 break; 2459 break;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 9d359e189a64..e79861eeed4c 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -584,6 +584,13 @@
584#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ 584#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
585 585
586/* 586/*
587 * Mitsubishi Electric Corp. (http://www.meau.com)
588 * Submitted by Konstantin Holoborodko
589 */
590#define MITSUBISHI_VID 0x06D3
591#define MITSUBISHI_FXUSB_PID 0x0284 /* USB/RS422 converters: FX-USB-AW/-BD */
592
593/*
587 * Definitions for B&B Electronics products. 594 * Definitions for B&B Electronics products.
588 */ 595 */
589#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */ 596#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 1a07b12ef341..81caf5623ee2 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -956,10 +956,7 @@ static void garmin_close(struct usb_serial_port *port)
956 if (!serial) 956 if (!serial)
957 return; 957 return;
958 958
959 mutex_lock(&port->serial->disc_mutex); 959 garmin_clear(garmin_data_p);
960
961 if (!port->serial->disconnected)
962 garmin_clear(garmin_data_p);
963 960
964 /* shutdown our urbs */ 961 /* shutdown our urbs */
965 usb_kill_urb(port->read_urb); 962 usb_kill_urb(port->read_urb);
@@ -968,8 +965,6 @@ static void garmin_close(struct usb_serial_port *port)
968 /* keep reset state so we know that we must start a new session */ 965 /* keep reset state so we know that we must start a new session */
969 if (garmin_data_p->state != STATE_RESET) 966 if (garmin_data_p->state != STATE_RESET)
970 garmin_data_p->state = STATE_DISCONNECTED; 967 garmin_data_p->state = STATE_DISCONNECTED;
971
972 mutex_unlock(&port->serial->disc_mutex);
973} 968}
974 969
975 970
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index b00e5cbf741f..efd8b978128c 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -110,7 +110,6 @@ struct edgeport_port {
110 wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ 110 wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
111 wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */ 111 wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */
112 wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */ 112 wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */
113 wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */
114 113
115 struct async_icount icount; 114 struct async_icount icount;
116 struct usb_serial_port *port; /* loop back to the owner of this object */ 115 struct usb_serial_port *port; /* loop back to the owner of this object */
@@ -884,7 +883,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
884 /* initialize our wait queues */ 883 /* initialize our wait queues */
885 init_waitqueue_head(&edge_port->wait_open); 884 init_waitqueue_head(&edge_port->wait_open);
886 init_waitqueue_head(&edge_port->wait_chase); 885 init_waitqueue_head(&edge_port->wait_chase);
887 init_waitqueue_head(&edge_port->delta_msr_wait);
888 init_waitqueue_head(&edge_port->wait_command); 886 init_waitqueue_head(&edge_port->wait_command);
889 887
890 /* initialize our icount structure */ 888 /* initialize our icount structure */
@@ -1669,13 +1667,17 @@ static int edge_ioctl(struct tty_struct *tty,
1669 dev_dbg(&port->dev, "%s (%d) TIOCMIWAIT\n", __func__, port->number); 1667 dev_dbg(&port->dev, "%s (%d) TIOCMIWAIT\n", __func__, port->number);
1670 cprev = edge_port->icount; 1668 cprev = edge_port->icount;
1671 while (1) { 1669 while (1) {
1672 prepare_to_wait(&edge_port->delta_msr_wait, 1670 prepare_to_wait(&port->delta_msr_wait,
1673 &wait, TASK_INTERRUPTIBLE); 1671 &wait, TASK_INTERRUPTIBLE);
1674 schedule(); 1672 schedule();
1675 finish_wait(&edge_port->delta_msr_wait, &wait); 1673 finish_wait(&port->delta_msr_wait, &wait);
1676 /* see if a signal did it */ 1674 /* see if a signal did it */
1677 if (signal_pending(current)) 1675 if (signal_pending(current))
1678 return -ERESTARTSYS; 1676 return -ERESTARTSYS;
1677
1678 if (port->serial->disconnected)
1679 return -EIO;
1680
1679 cnow = edge_port->icount; 1681 cnow = edge_port->icount;
1680 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 1682 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
1681 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) 1683 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
@@ -2051,7 +2053,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr)
2051 icount->dcd++; 2053 icount->dcd++;
2052 if (newMsr & EDGEPORT_MSR_DELTA_RI) 2054 if (newMsr & EDGEPORT_MSR_DELTA_RI)
2053 icount->rng++; 2055 icount->rng++;
2054 wake_up_interruptible(&edge_port->delta_msr_wait); 2056 wake_up_interruptible(&edge_port->port->delta_msr_wait);
2055 } 2057 }
2056 2058
2057 /* Save the new modem status */ 2059 /* Save the new modem status */
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index c23776679f70..7777172206de 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -87,9 +87,6 @@ struct edgeport_port {
87 int close_pending; 87 int close_pending;
88 int lsr_event; 88 int lsr_event;
89 struct async_icount icount; 89 struct async_icount icount;
90 wait_queue_head_t delta_msr_wait; /* for handling sleeping while
91 waiting for msr change to
92 happen */
93 struct edgeport_serial *edge_serial; 90 struct edgeport_serial *edge_serial;
94 struct usb_serial_port *port; 91 struct usb_serial_port *port;
95 __u8 bUartMode; /* Port type, 0: RS232, etc. */ 92 __u8 bUartMode; /* Port type, 0: RS232, etc. */
@@ -1459,7 +1456,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr)
1459 icount->dcd++; 1456 icount->dcd++;
1460 if (msr & EDGEPORT_MSR_DELTA_RI) 1457 if (msr & EDGEPORT_MSR_DELTA_RI)
1461 icount->rng++; 1458 icount->rng++;
1462 wake_up_interruptible(&edge_port->delta_msr_wait); 1459 wake_up_interruptible(&edge_port->port->delta_msr_wait);
1463 } 1460 }
1464 1461
1465 /* Save the new modem status */ 1462 /* Save the new modem status */
@@ -1754,7 +1751,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
1754 dev = port->serial->dev; 1751 dev = port->serial->dev;
1755 1752
1756 memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount)); 1753 memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount));
1757 init_waitqueue_head(&edge_port->delta_msr_wait);
1758 1754
1759 /* turn off loopback */ 1755 /* turn off loopback */
1760 status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0); 1756 status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0);
@@ -2434,10 +2430,14 @@ static int edge_ioctl(struct tty_struct *tty,
2434 dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); 2430 dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__);
2435 cprev = edge_port->icount; 2431 cprev = edge_port->icount;
2436 while (1) { 2432 while (1) {
2437 interruptible_sleep_on(&edge_port->delta_msr_wait); 2433 interruptible_sleep_on(&port->delta_msr_wait);
2438 /* see if a signal did it */ 2434 /* see if a signal did it */
2439 if (signal_pending(current)) 2435 if (signal_pending(current))
2440 return -ERESTARTSYS; 2436 return -ERESTARTSYS;
2437
2438 if (port->serial->disconnected)
2439 return -EIO;
2440
2441 cnow = edge_port->icount; 2441 cnow = edge_port->icount;
2442 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 2442 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2443 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) 2443 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
@@ -2649,6 +2649,7 @@ static struct usb_serial_driver edgeport_2port_device = {
2649 .set_termios = edge_set_termios, 2649 .set_termios = edge_set_termios,
2650 .tiocmget = edge_tiocmget, 2650 .tiocmget = edge_tiocmget,
2651 .tiocmset = edge_tiocmset, 2651 .tiocmset = edge_tiocmset,
2652 .get_icount = edge_get_icount,
2652 .write = edge_write, 2653 .write = edge_write,
2653 .write_room = edge_write_room, 2654 .write_room = edge_write_room,
2654 .chars_in_buffer = edge_chars_in_buffer, 2655 .chars_in_buffer = edge_chars_in_buffer,
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index a64d420f687b..06d5a60be2c4 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -114,8 +114,6 @@ struct mct_u232_private {
114 unsigned char last_msr; /* Modem Status Register */ 114 unsigned char last_msr; /* Modem Status Register */
115 unsigned int rx_flags; /* Throttling flags */ 115 unsigned int rx_flags; /* Throttling flags */
116 struct async_icount icount; 116 struct async_icount icount;
117 wait_queue_head_t msr_wait; /* for handling sleeping while waiting
118 for msr change to happen */
119}; 117};
120 118
121#define THROTTLED 0x01 119#define THROTTLED 0x01
@@ -409,7 +407,6 @@ static int mct_u232_port_probe(struct usb_serial_port *port)
409 return -ENOMEM; 407 return -ENOMEM;
410 408
411 spin_lock_init(&priv->lock); 409 spin_lock_init(&priv->lock);
412 init_waitqueue_head(&priv->msr_wait);
413 410
414 usb_set_serial_port_data(port, priv); 411 usb_set_serial_port_data(port, priv);
415 412
@@ -601,7 +598,7 @@ static void mct_u232_read_int_callback(struct urb *urb)
601 tty_kref_put(tty); 598 tty_kref_put(tty);
602 } 599 }
603#endif 600#endif
604 wake_up_interruptible(&priv->msr_wait); 601 wake_up_interruptible(&port->delta_msr_wait);
605 spin_unlock_irqrestore(&priv->lock, flags); 602 spin_unlock_irqrestore(&priv->lock, flags);
606exit: 603exit:
607 retval = usb_submit_urb(urb, GFP_ATOMIC); 604 retval = usb_submit_urb(urb, GFP_ATOMIC);
@@ -810,13 +807,17 @@ static int mct_u232_ioctl(struct tty_struct *tty,
810 cprev = mct_u232_port->icount; 807 cprev = mct_u232_port->icount;
811 spin_unlock_irqrestore(&mct_u232_port->lock, flags); 808 spin_unlock_irqrestore(&mct_u232_port->lock, flags);
812 for ( ; ; ) { 809 for ( ; ; ) {
813 prepare_to_wait(&mct_u232_port->msr_wait, 810 prepare_to_wait(&port->delta_msr_wait,
814 &wait, TASK_INTERRUPTIBLE); 811 &wait, TASK_INTERRUPTIBLE);
815 schedule(); 812 schedule();
816 finish_wait(&mct_u232_port->msr_wait, &wait); 813 finish_wait(&port->delta_msr_wait, &wait);
817 /* see if a signal did it */ 814 /* see if a signal did it */
818 if (signal_pending(current)) 815 if (signal_pending(current))
819 return -ERESTARTSYS; 816 return -ERESTARTSYS;
817
818 if (port->serial->disconnected)
819 return -EIO;
820
820 spin_lock_irqsave(&mct_u232_port->lock, flags); 821 spin_lock_irqsave(&mct_u232_port->lock, flags);
821 cnow = mct_u232_port->icount; 822 cnow = mct_u232_port->icount;
822 spin_unlock_irqrestore(&mct_u232_port->lock, flags); 823 spin_unlock_irqrestore(&mct_u232_port->lock, flags);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 809fb329eca5..b8051fa61911 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -219,7 +219,6 @@ struct moschip_port {
219 char open; 219 char open;
220 char open_ports; 220 char open_ports;
221 wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ 221 wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
222 wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */
223 int delta_msr_cond; 222 int delta_msr_cond;
224 struct async_icount icount; 223 struct async_icount icount;
225 struct usb_serial_port *port; /* loop back to the owner of this object */ 224 struct usb_serial_port *port; /* loop back to the owner of this object */
@@ -423,6 +422,9 @@ static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr)
423 icount->rng++; 422 icount->rng++;
424 smp_wmb(); 423 smp_wmb();
425 } 424 }
425
426 mos7840_port->delta_msr_cond = 1;
427 wake_up_interruptible(&port->port->delta_msr_wait);
426 } 428 }
427} 429}
428 430
@@ -1127,7 +1129,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
1127 1129
1128 /* initialize our wait queues */ 1130 /* initialize our wait queues */
1129 init_waitqueue_head(&mos7840_port->wait_chase); 1131 init_waitqueue_head(&mos7840_port->wait_chase);
1130 init_waitqueue_head(&mos7840_port->delta_msr_wait);
1131 1132
1132 /* initialize our icount structure */ 1133 /* initialize our icount structure */
1133 memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount)); 1134 memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount));
@@ -2017,8 +2018,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
2017 mos7840_port->read_urb_busy = false; 2018 mos7840_port->read_urb_busy = false;
2018 } 2019 }
2019 } 2020 }
2020 wake_up(&mos7840_port->delta_msr_wait);
2021 mos7840_port->delta_msr_cond = 1;
2022 dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__, 2021 dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__,
2023 mos7840_port->shadowLCR); 2022 mos7840_port->shadowLCR);
2024} 2023}
@@ -2219,13 +2218,18 @@ static int mos7840_ioctl(struct tty_struct *tty,
2219 while (1) { 2218 while (1) {
2220 /* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */ 2219 /* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */
2221 mos7840_port->delta_msr_cond = 0; 2220 mos7840_port->delta_msr_cond = 0;
2222 wait_event_interruptible(mos7840_port->delta_msr_wait, 2221 wait_event_interruptible(port->delta_msr_wait,
2223 (mos7840_port-> 2222 (port->serial->disconnected ||
2223 mos7840_port->
2224 delta_msr_cond == 1)); 2224 delta_msr_cond == 1));
2225 2225
2226 /* see if a signal did it */ 2226 /* see if a signal did it */
2227 if (signal_pending(current)) 2227 if (signal_pending(current))
2228 return -ERESTARTSYS; 2228 return -ERESTARTSYS;
2229
2230 if (port->serial->disconnected)
2231 return -EIO;
2232
2229 cnow = mos7840_port->icount; 2233 cnow = mos7840_port->icount;
2230 smp_rmb(); 2234 smp_rmb();
2231 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 2235 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index a958fd41b5b3..87c71ccfee87 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -188,7 +188,6 @@ struct oti6858_private {
188 u8 setup_done; 188 u8 setup_done;
189 struct delayed_work delayed_setup_work; 189 struct delayed_work delayed_setup_work;
190 190
191 wait_queue_head_t intr_wait;
192 struct usb_serial_port *port; /* USB port with which associated */ 191 struct usb_serial_port *port; /* USB port with which associated */
193}; 192};
194 193
@@ -339,7 +338,6 @@ static int oti6858_port_probe(struct usb_serial_port *port)
339 return -ENOMEM; 338 return -ENOMEM;
340 339
341 spin_lock_init(&priv->lock); 340 spin_lock_init(&priv->lock);
342 init_waitqueue_head(&priv->intr_wait);
343 priv->port = port; 341 priv->port = port;
344 INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line); 342 INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line);
345 INIT_DELAYED_WORK(&priv->delayed_write_work, send_data); 343 INIT_DELAYED_WORK(&priv->delayed_write_work, send_data);
@@ -664,11 +662,15 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
664 spin_unlock_irqrestore(&priv->lock, flags); 662 spin_unlock_irqrestore(&priv->lock, flags);
665 663
666 while (1) { 664 while (1) {
667 wait_event_interruptible(priv->intr_wait, 665 wait_event_interruptible(port->delta_msr_wait,
666 port->serial->disconnected ||
668 priv->status.pin_state != prev); 667 priv->status.pin_state != prev);
669 if (signal_pending(current)) 668 if (signal_pending(current))
670 return -ERESTARTSYS; 669 return -ERESTARTSYS;
671 670
671 if (port->serial->disconnected)
672 return -EIO;
673
672 spin_lock_irqsave(&priv->lock, flags); 674 spin_lock_irqsave(&priv->lock, flags);
673 status = priv->status.pin_state & PIN_MASK; 675 status = priv->status.pin_state & PIN_MASK;
674 spin_unlock_irqrestore(&priv->lock, flags); 676 spin_unlock_irqrestore(&priv->lock, flags);
@@ -763,7 +765,7 @@ static void oti6858_read_int_callback(struct urb *urb)
763 765
764 if (!priv->transient) { 766 if (!priv->transient) {
765 if (xs->pin_state != priv->status.pin_state) 767 if (xs->pin_state != priv->status.pin_state)
766 wake_up_interruptible(&priv->intr_wait); 768 wake_up_interruptible(&port->delta_msr_wait);
767 memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE); 769 memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE);
768 } 770 }
769 771
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 54adc9125e5c..3b10018d89a3 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -139,7 +139,6 @@ struct pl2303_serial_private {
139 139
140struct pl2303_private { 140struct pl2303_private {
141 spinlock_t lock; 141 spinlock_t lock;
142 wait_queue_head_t delta_msr_wait;
143 u8 line_control; 142 u8 line_control;
144 u8 line_status; 143 u8 line_status;
145}; 144};
@@ -233,7 +232,6 @@ static int pl2303_port_probe(struct usb_serial_port *port)
233 return -ENOMEM; 232 return -ENOMEM;
234 233
235 spin_lock_init(&priv->lock); 234 spin_lock_init(&priv->lock);
236 init_waitqueue_head(&priv->delta_msr_wait);
237 235
238 usb_set_serial_port_data(port, priv); 236 usb_set_serial_port_data(port, priv);
239 237
@@ -607,11 +605,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
607 spin_unlock_irqrestore(&priv->lock, flags); 605 spin_unlock_irqrestore(&priv->lock, flags);
608 606
609 while (1) { 607 while (1) {
610 interruptible_sleep_on(&priv->delta_msr_wait); 608 interruptible_sleep_on(&port->delta_msr_wait);
611 /* see if a signal did it */ 609 /* see if a signal did it */
612 if (signal_pending(current)) 610 if (signal_pending(current))
613 return -ERESTARTSYS; 611 return -ERESTARTSYS;
614 612
613 if (port->serial->disconnected)
614 return -EIO;
615
615 spin_lock_irqsave(&priv->lock, flags); 616 spin_lock_irqsave(&priv->lock, flags);
616 status = priv->line_status; 617 status = priv->line_status;
617 spin_unlock_irqrestore(&priv->lock, flags); 618 spin_unlock_irqrestore(&priv->lock, flags);
@@ -719,7 +720,7 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
719 spin_unlock_irqrestore(&priv->lock, flags); 720 spin_unlock_irqrestore(&priv->lock, flags);
720 if (priv->line_status & UART_BREAK_ERROR) 721 if (priv->line_status & UART_BREAK_ERROR)
721 usb_serial_handle_break(port); 722 usb_serial_handle_break(port);
722 wake_up_interruptible(&priv->delta_msr_wait); 723 wake_up_interruptible(&port->delta_msr_wait);
723 724
724 tty = tty_port_tty_get(&port->port); 725 tty = tty_port_tty_get(&port->port);
725 if (!tty) 726 if (!tty)
@@ -783,7 +784,7 @@ static void pl2303_process_read_urb(struct urb *urb)
783 line_status = priv->line_status; 784 line_status = priv->line_status;
784 priv->line_status &= ~UART_STATE_TRANSIENT_MASK; 785 priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
785 spin_unlock_irqrestore(&priv->lock, flags); 786 spin_unlock_irqrestore(&priv->lock, flags);
786 wake_up_interruptible(&priv->delta_msr_wait); 787 wake_up_interruptible(&port->delta_msr_wait);
787 788
788 if (!urb->actual_length) 789 if (!urb->actual_length)
789 return; 790 return;
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index d643a4d4d770..75f125ddb0c9 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -128,7 +128,6 @@ struct qt2_port_private {
128 u8 shadowLSR; 128 u8 shadowLSR;
129 u8 shadowMSR; 129 u8 shadowMSR;
130 130
131 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
132 struct async_icount icount; 131 struct async_icount icount;
133 132
134 struct usb_serial_port *port; 133 struct usb_serial_port *port;
@@ -506,8 +505,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
506 spin_unlock_irqrestore(&priv->lock, flags); 505 spin_unlock_irqrestore(&priv->lock, flags);
507 506
508 while (1) { 507 while (1) {
509 wait_event_interruptible(priv->delta_msr_wait, 508 wait_event_interruptible(port->delta_msr_wait,
510 ((priv->icount.rng != prev.rng) || 509 (port->serial->disconnected ||
510 (priv->icount.rng != prev.rng) ||
511 (priv->icount.dsr != prev.dsr) || 511 (priv->icount.dsr != prev.dsr) ||
512 (priv->icount.dcd != prev.dcd) || 512 (priv->icount.dcd != prev.dcd) ||
513 (priv->icount.cts != prev.cts))); 513 (priv->icount.cts != prev.cts)));
@@ -515,6 +515,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
515 if (signal_pending(current)) 515 if (signal_pending(current))
516 return -ERESTARTSYS; 516 return -ERESTARTSYS;
517 517
518 if (port->serial->disconnected)
519 return -EIO;
520
518 spin_lock_irqsave(&priv->lock, flags); 521 spin_lock_irqsave(&priv->lock, flags);
519 cur = priv->icount; 522 cur = priv->icount;
520 spin_unlock_irqrestore(&priv->lock, flags); 523 spin_unlock_irqrestore(&priv->lock, flags);
@@ -827,7 +830,6 @@ static int qt2_port_probe(struct usb_serial_port *port)
827 830
828 spin_lock_init(&port_priv->lock); 831 spin_lock_init(&port_priv->lock);
829 spin_lock_init(&port_priv->urb_lock); 832 spin_lock_init(&port_priv->urb_lock);
830 init_waitqueue_head(&port_priv->delta_msr_wait);
831 port_priv->port = port; 833 port_priv->port = port;
832 834
833 port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL); 835 port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -970,7 +972,7 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch)
970 if (newMSR & UART_MSR_TERI) 972 if (newMSR & UART_MSR_TERI)
971 port_priv->icount.rng++; 973 port_priv->icount.rng++;
972 974
973 wake_up_interruptible(&port_priv->delta_msr_wait); 975 wake_up_interruptible(&port->delta_msr_wait);
974 } 976 }
975} 977}
976 978
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 91ff8e3bddbd..549ef68ff5fa 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -149,7 +149,6 @@ enum spcp8x5_type {
149struct spcp8x5_private { 149struct spcp8x5_private {
150 spinlock_t lock; 150 spinlock_t lock;
151 enum spcp8x5_type type; 151 enum spcp8x5_type type;
152 wait_queue_head_t delta_msr_wait;
153 u8 line_control; 152 u8 line_control;
154 u8 line_status; 153 u8 line_status;
155}; 154};
@@ -179,7 +178,6 @@ static int spcp8x5_port_probe(struct usb_serial_port *port)
179 return -ENOMEM; 178 return -ENOMEM;
180 179
181 spin_lock_init(&priv->lock); 180 spin_lock_init(&priv->lock);
182 init_waitqueue_head(&priv->delta_msr_wait);
183 priv->type = type; 181 priv->type = type;
184 182
185 usb_set_serial_port_data(port , priv); 183 usb_set_serial_port_data(port , priv);
@@ -475,7 +473,7 @@ static void spcp8x5_process_read_urb(struct urb *urb)
475 priv->line_status &= ~UART_STATE_TRANSIENT_MASK; 473 priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
476 spin_unlock_irqrestore(&priv->lock, flags); 474 spin_unlock_irqrestore(&priv->lock, flags);
477 /* wake up the wait for termios */ 475 /* wake up the wait for termios */
478 wake_up_interruptible(&priv->delta_msr_wait); 476 wake_up_interruptible(&port->delta_msr_wait);
479 477
480 if (!urb->actual_length) 478 if (!urb->actual_length)
481 return; 479 return;
@@ -526,12 +524,15 @@ static int spcp8x5_wait_modem_info(struct usb_serial_port *port,
526 524
527 while (1) { 525 while (1) {
528 /* wake up in bulk read */ 526 /* wake up in bulk read */
529 interruptible_sleep_on(&priv->delta_msr_wait); 527 interruptible_sleep_on(&port->delta_msr_wait);
530 528
531 /* see if a signal did it */ 529 /* see if a signal did it */
532 if (signal_pending(current)) 530 if (signal_pending(current))
533 return -ERESTARTSYS; 531 return -ERESTARTSYS;
534 532
533 if (port->serial->disconnected)
534 return -EIO;
535
535 spin_lock_irqsave(&priv->lock, flags); 536 spin_lock_irqsave(&priv->lock, flags);
536 status = priv->line_status; 537 status = priv->line_status;
537 spin_unlock_irqrestore(&priv->lock, flags); 538 spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index b57cf841c5b6..4b2a19757b4d 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -61,7 +61,6 @@ struct ssu100_port_private {
61 spinlock_t status_lock; 61 spinlock_t status_lock;
62 u8 shadowLSR; 62 u8 shadowLSR;
63 u8 shadowMSR; 63 u8 shadowMSR;
64 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
65 struct async_icount icount; 64 struct async_icount icount;
66}; 65};
67 66
@@ -355,8 +354,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
355 spin_unlock_irqrestore(&priv->status_lock, flags); 354 spin_unlock_irqrestore(&priv->status_lock, flags);
356 355
357 while (1) { 356 while (1) {
358 wait_event_interruptible(priv->delta_msr_wait, 357 wait_event_interruptible(port->delta_msr_wait,
359 ((priv->icount.rng != prev.rng) || 358 (port->serial->disconnected ||
359 (priv->icount.rng != prev.rng) ||
360 (priv->icount.dsr != prev.dsr) || 360 (priv->icount.dsr != prev.dsr) ||
361 (priv->icount.dcd != prev.dcd) || 361 (priv->icount.dcd != prev.dcd) ||
362 (priv->icount.cts != prev.cts))); 362 (priv->icount.cts != prev.cts)));
@@ -364,6 +364,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
364 if (signal_pending(current)) 364 if (signal_pending(current))
365 return -ERESTARTSYS; 365 return -ERESTARTSYS;
366 366
367 if (port->serial->disconnected)
368 return -EIO;
369
367 spin_lock_irqsave(&priv->status_lock, flags); 370 spin_lock_irqsave(&priv->status_lock, flags);
368 cur = priv->icount; 371 cur = priv->icount;
369 spin_unlock_irqrestore(&priv->status_lock, flags); 372 spin_unlock_irqrestore(&priv->status_lock, flags);
@@ -445,7 +448,6 @@ static int ssu100_port_probe(struct usb_serial_port *port)
445 return -ENOMEM; 448 return -ENOMEM;
446 449
447 spin_lock_init(&priv->status_lock); 450 spin_lock_init(&priv->status_lock);
448 init_waitqueue_head(&priv->delta_msr_wait);
449 451
450 usb_set_serial_port_data(port, priv); 452 usb_set_serial_port_data(port, priv);
451 453
@@ -537,7 +539,7 @@ static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
537 priv->icount.dcd++; 539 priv->icount.dcd++;
538 if (msr & UART_MSR_TERI) 540 if (msr & UART_MSR_TERI)
539 priv->icount.rng++; 541 priv->icount.rng++;
540 wake_up_interruptible(&priv->delta_msr_wait); 542 wake_up_interruptible(&port->delta_msr_wait);
541 } 543 }
542} 544}
543 545
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 39cb9b807c3c..73deb029fc05 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -74,7 +74,6 @@ struct ti_port {
74 int tp_flags; 74 int tp_flags;
75 int tp_closing_wait;/* in .01 secs */ 75 int tp_closing_wait;/* in .01 secs */
76 struct async_icount tp_icount; 76 struct async_icount tp_icount;
77 wait_queue_head_t tp_msr_wait; /* wait for msr change */
78 wait_queue_head_t tp_write_wait; 77 wait_queue_head_t tp_write_wait;
79 struct ti_device *tp_tdev; 78 struct ti_device *tp_tdev;
80 struct usb_serial_port *tp_port; 79 struct usb_serial_port *tp_port;
@@ -432,7 +431,6 @@ static int ti_port_probe(struct usb_serial_port *port)
432 else 431 else
433 tport->tp_uart_base_addr = TI_UART2_BASE_ADDR; 432 tport->tp_uart_base_addr = TI_UART2_BASE_ADDR;
434 tport->tp_closing_wait = closing_wait; 433 tport->tp_closing_wait = closing_wait;
435 init_waitqueue_head(&tport->tp_msr_wait);
436 init_waitqueue_head(&tport->tp_write_wait); 434 init_waitqueue_head(&tport->tp_write_wait);
437 if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) { 435 if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) {
438 kfree(tport); 436 kfree(tport);
@@ -784,9 +782,13 @@ static int ti_ioctl(struct tty_struct *tty,
784 dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); 782 dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__);
785 cprev = tport->tp_icount; 783 cprev = tport->tp_icount;
786 while (1) { 784 while (1) {
787 interruptible_sleep_on(&tport->tp_msr_wait); 785 interruptible_sleep_on(&port->delta_msr_wait);
788 if (signal_pending(current)) 786 if (signal_pending(current))
789 return -ERESTARTSYS; 787 return -ERESTARTSYS;
788
789 if (port->serial->disconnected)
790 return -EIO;
791
790 cnow = tport->tp_icount; 792 cnow = tport->tp_icount;
791 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 793 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
792 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) 794 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
@@ -1392,7 +1394,7 @@ static void ti_handle_new_msr(struct ti_port *tport, __u8 msr)
1392 icount->dcd++; 1394 icount->dcd++;
1393 if (msr & TI_MSR_DELTA_RI) 1395 if (msr & TI_MSR_DELTA_RI)
1394 icount->rng++; 1396 icount->rng++;
1395 wake_up_interruptible(&tport->tp_msr_wait); 1397 wake_up_interruptible(&tport->tp_port->delta_msr_wait);
1396 spin_unlock_irqrestore(&tport->tp_lock, flags); 1398 spin_unlock_irqrestore(&tport->tp_lock, flags);
1397 } 1399 }
1398 1400
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index a19ed74d770d..5d9b178484fd 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -151,6 +151,7 @@ static void destroy_serial(struct kref *kref)
151 } 151 }
152 } 152 }
153 153
154 usb_put_intf(serial->interface);
154 usb_put_dev(serial->dev); 155 usb_put_dev(serial->dev);
155 kfree(serial); 156 kfree(serial);
156} 157}
@@ -620,7 +621,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
620 } 621 }
621 serial->dev = usb_get_dev(dev); 622 serial->dev = usb_get_dev(dev);
622 serial->type = driver; 623 serial->type = driver;
623 serial->interface = interface; 624 serial->interface = usb_get_intf(interface);
624 kref_init(&serial->kref); 625 kref_init(&serial->kref);
625 mutex_init(&serial->disc_mutex); 626 mutex_init(&serial->disc_mutex);
626 serial->minor = SERIAL_TTY_NO_MINOR; 627 serial->minor = SERIAL_TTY_NO_MINOR;
@@ -902,6 +903,7 @@ static int usb_serial_probe(struct usb_interface *interface,
902 port->port.ops = &serial_port_ops; 903 port->port.ops = &serial_port_ops;
903 port->serial = serial; 904 port->serial = serial;
904 spin_lock_init(&port->lock); 905 spin_lock_init(&port->lock);
906 init_waitqueue_head(&port->delta_msr_wait);
905 /* Keep this for private driver use for the moment but 907 /* Keep this for private driver use for the moment but
906 should probably go away */ 908 should probably go away */
907 INIT_WORK(&port->work, usb_serial_port_work); 909 INIT_WORK(&port->work, usb_serial_port_work);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index da04a074e790..1799335288bd 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -496,6 +496,13 @@ UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999,
496 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 496 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
497 US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG), 497 US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG),
498 498
499/* Added by Dmitry Artamonow <mad_soft@inbox.ru> */
500UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999,
501 "Samsung",
502 "YP-Z3",
503 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
504 US_FL_MAX_SECTORS_64),
505
499/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>. 506/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
500 * Device uses standards-violating 32-byte Bulk Command Block Wrappers and 507 * Device uses standards-violating 32-byte Bulk Command Block Wrappers and
501 * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011. 508 * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 964ff22bf281..aeb00fc2d3be 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -27,6 +27,7 @@
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29#include <linux/vfio.h> 29#include <linux/vfio.h>
30#include <linux/slab.h>
30 31
31#include "vfio_pci_private.h" 32#include "vfio_pci_private.h"
32 33
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 3639371fa697..a96509187deb 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -22,6 +22,7 @@
22#include <linux/vfio.h> 22#include <linux/vfio.h>
23#include <linux/wait.h> 23#include <linux/wait.h>
24#include <linux/workqueue.h> 24#include <linux/workqueue.h>
25#include <linux/slab.h>
25 26
26#include "vfio_pci_private.h" 27#include "vfio_pci_private.h"
27 28
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 959b1cd89e6a..ec6fb3fa59bb 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -339,7 +339,8 @@ static void handle_tx(struct vhost_net *net)
339 msg.msg_controllen = 0; 339 msg.msg_controllen = 0;
340 ubufs = NULL; 340 ubufs = NULL;
341 } else { 341 } else {
342 struct ubuf_info *ubuf = &vq->ubuf_info[head]; 342 struct ubuf_info *ubuf;
343 ubuf = vq->ubuf_info + vq->upend_idx;
343 344
344 vq->heads[vq->upend_idx].len = 345 vq->heads[vq->upend_idx].len =
345 VHOST_DMA_IN_PROGRESS; 346 VHOST_DMA_IN_PROGRESS;
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 9951297b2427..2968b4934659 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -60,6 +60,15 @@ enum {
60 VHOST_SCSI_VQ_IO = 2, 60 VHOST_SCSI_VQ_IO = 2,
61}; 61};
62 62
63/*
64 * VIRTIO_RING_F_EVENT_IDX seems broken. Not sure the bug is in
65 * kernel but disabling it helps.
66 * TODO: debug and remove the workaround.
67 */
68enum {
69 VHOST_SCSI_FEATURES = VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)
70};
71
63#define VHOST_SCSI_MAX_TARGET 256 72#define VHOST_SCSI_MAX_TARGET 256
64#define VHOST_SCSI_MAX_VQ 128 73#define VHOST_SCSI_MAX_VQ 128
65 74
@@ -850,7 +859,7 @@ static int vhost_scsi_clear_endpoint(
850 for (index = 0; index < vs->dev.nvqs; ++index) { 859 for (index = 0; index < vs->dev.nvqs; ++index) {
851 if (!vhost_vq_access_ok(&vs->vqs[index])) { 860 if (!vhost_vq_access_ok(&vs->vqs[index])) {
852 ret = -EFAULT; 861 ret = -EFAULT;
853 goto err; 862 goto err_dev;
854 } 863 }
855 } 864 }
856 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { 865 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
@@ -860,10 +869,11 @@ static int vhost_scsi_clear_endpoint(
860 if (!tv_tpg) 869 if (!tv_tpg)
861 continue; 870 continue;
862 871
872 mutex_lock(&tv_tpg->tv_tpg_mutex);
863 tv_tport = tv_tpg->tport; 873 tv_tport = tv_tpg->tport;
864 if (!tv_tport) { 874 if (!tv_tport) {
865 ret = -ENODEV; 875 ret = -ENODEV;
866 goto err; 876 goto err_tpg;
867 } 877 }
868 878
869 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 879 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
@@ -872,16 +882,19 @@ static int vhost_scsi_clear_endpoint(
872 tv_tport->tport_name, tv_tpg->tport_tpgt, 882 tv_tport->tport_name, tv_tpg->tport_tpgt,
873 t->vhost_wwpn, t->vhost_tpgt); 883 t->vhost_wwpn, t->vhost_tpgt);
874 ret = -EINVAL; 884 ret = -EINVAL;
875 goto err; 885 goto err_tpg;
876 } 886 }
877 tv_tpg->tv_tpg_vhost_count--; 887 tv_tpg->tv_tpg_vhost_count--;
878 vs->vs_tpg[target] = NULL; 888 vs->vs_tpg[target] = NULL;
879 vs->vs_endpoint = false; 889 vs->vs_endpoint = false;
890 mutex_unlock(&tv_tpg->tv_tpg_mutex);
880 } 891 }
881 mutex_unlock(&vs->dev.mutex); 892 mutex_unlock(&vs->dev.mutex);
882 return 0; 893 return 0;
883 894
884err: 895err_tpg:
896 mutex_unlock(&tv_tpg->tv_tpg_mutex);
897err_dev:
885 mutex_unlock(&vs->dev.mutex); 898 mutex_unlock(&vs->dev.mutex);
886 return ret; 899 return ret;
887} 900}
@@ -937,11 +950,12 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
937 950
938 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 951 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
939 vhost_scsi_flush_vq(vs, i); 952 vhost_scsi_flush_vq(vs, i);
953 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
940} 954}
941 955
942static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) 956static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
943{ 957{
944 if (features & ~VHOST_FEATURES) 958 if (features & ~VHOST_SCSI_FEATURES)
945 return -EOPNOTSUPP; 959 return -EOPNOTSUPP;
946 960
947 mutex_lock(&vs->dev.mutex); 961 mutex_lock(&vs->dev.mutex);
@@ -987,7 +1001,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
987 return -EFAULT; 1001 return -EFAULT;
988 return 0; 1002 return 0;
989 case VHOST_GET_FEATURES: 1003 case VHOST_GET_FEATURES:
990 features = VHOST_FEATURES; 1004 features = VHOST_SCSI_FEATURES;
991 if (copy_to_user(featurep, &features, sizeof features)) 1005 if (copy_to_user(featurep, &features, sizeof features))
992 return -EFAULT; 1006 return -EFAULT;
993 return 0; 1007 return 0;
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 12cf5f31ee8f..025428e04c33 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -422,17 +422,22 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
422 = var->bits_per_pixel; 422 = var->bits_per_pixel;
423 break; 423 break;
424 case 16: 424 case 16:
425 /* Older SOCs use IBGR:555 rather than BGR:565. */
426 if (sinfo->have_intensity_bit)
427 var->green.length = 5;
428 else
429 var->green.length = 6;
430
425 if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) { 431 if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
426 /* RGB:565 mode */ 432 /* RGB:5X5 mode */
427 var->red.offset = 11; 433 var->red.offset = var->green.length + 5;
428 var->blue.offset = 0; 434 var->blue.offset = 0;
429 } else { 435 } else {
430 /* BGR:565 mode */ 436 /* BGR:5X5 mode */
431 var->red.offset = 0; 437 var->red.offset = 0;
432 var->blue.offset = 11; 438 var->blue.offset = var->green.length + 5;
433 } 439 }
434 var->green.offset = 5; 440 var->green.offset = 5;
435 var->green.length = 6;
436 var->red.length = var->blue.length = 5; 441 var->red.length = var->blue.length = 5;
437 break; 442 break;
438 case 32: 443 case 32:
@@ -679,8 +684,7 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red,
679 684
680 case FB_VISUAL_PSEUDOCOLOR: 685 case FB_VISUAL_PSEUDOCOLOR:
681 if (regno < 256) { 686 if (regno < 256) {
682 if (cpu_is_at91sam9261() || cpu_is_at91sam9263() 687 if (sinfo->have_intensity_bit) {
683 || cpu_is_at91sam9rl()) {
684 /* old style I+BGR:555 */ 688 /* old style I+BGR:555 */
685 val = ((red >> 11) & 0x001f); 689 val = ((red >> 11) & 0x001f);
686 val |= ((green >> 6) & 0x03e0); 690 val |= ((green >> 6) & 0x03e0);
@@ -870,6 +874,10 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
870 } 874 }
871 sinfo->info = info; 875 sinfo->info = info;
872 sinfo->pdev = pdev; 876 sinfo->pdev = pdev;
877 if (cpu_is_at91sam9261() || cpu_is_at91sam9263() ||
878 cpu_is_at91sam9rl()) {
879 sinfo->have_intensity_bit = true;
880 }
873 881
874 strcpy(info->fix.id, sinfo->pdev->name); 882 strcpy(info->fix.id, sinfo->pdev->name);
875 info->flags = ATMEL_LCDFB_FBINFO_DEFAULT; 883 info->flags = ATMEL_LCDFB_FBINFO_DEFAULT;
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index 3f2519d30715..e06cd5d90c97 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -23,6 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/fb.h> 25#include <linux/fb.h>
26#include <linux/io.h>
26 27
27#include <linux/platform_data/video-ep93xx.h> 28#include <linux/platform_data/video-ep93xx.h>
28 29
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 755556ca5b2d..45169cbaba6e 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -169,6 +169,7 @@ struct mxsfb_info {
169 unsigned dotclk_delay; 169 unsigned dotclk_delay;
170 const struct mxsfb_devdata *devdata; 170 const struct mxsfb_devdata *devdata;
171 int mapped; 171 int mapped;
172 u32 sync;
172}; 173};
173 174
174#define mxsfb_is_v3(host) (host->devdata->ipversion == 3) 175#define mxsfb_is_v3(host) (host->devdata->ipversion == 3)
@@ -456,9 +457,9 @@ static int mxsfb_set_par(struct fb_info *fb_info)
456 vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; 457 vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
457 if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT) 458 if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT)
458 vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; 459 vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
459 if (fb_info->var.sync & FB_SYNC_DATA_ENABLE_HIGH_ACT) 460 if (host->sync & MXSFB_SYNC_DATA_ENABLE_HIGH_ACT)
460 vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; 461 vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
461 if (fb_info->var.sync & FB_SYNC_DOTCLK_FAILING_ACT) 462 if (host->sync & MXSFB_SYNC_DOTCLK_FAILING_ACT)
462 vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING; 463 vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING;
463 464
464 writel(vdctrl0, host->base + LCDC_VDCTRL0); 465 writel(vdctrl0, host->base + LCDC_VDCTRL0);
@@ -861,6 +862,8 @@ static int mxsfb_probe(struct platform_device *pdev)
861 862
862 INIT_LIST_HEAD(&fb_info->modelist); 863 INIT_LIST_HEAD(&fb_info->modelist);
863 864
865 host->sync = pdata->sync;
866
864 ret = mxsfb_init_fbinfo(host); 867 ret = mxsfb_init_fbinfo(host);
865 if (ret != 0) 868 if (ret != 0)
866 goto error_init_fb; 869 goto error_init_fb;
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index e31f5b33b501..d40612c31a98 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -32,6 +32,8 @@
32 32
33#include <linux/omap-dma.h> 33#include <linux/omap-dma.h>
34 34
35#include <mach/hardware.h>
36
35#include "omapfb.h" 37#include "omapfb.h"
36#include "lcdc.h" 38#include "lcdc.h"
37 39
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index 6b6643911d29..048c98381ef6 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -63,6 +63,9 @@ struct tpo_td043_device {
63 u32 power_on_resume:1; 63 u32 power_on_resume:1;
64}; 64};
65 65
66/* used to pass spi_device from SPI to DSS portion of the driver */
67static struct tpo_td043_device *g_tpo_td043;
68
66static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data) 69static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data)
67{ 70{
68 struct spi_message m; 71 struct spi_message m;
@@ -403,7 +406,7 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
403 406
404static int tpo_td043_probe(struct omap_dss_device *dssdev) 407static int tpo_td043_probe(struct omap_dss_device *dssdev)
405{ 408{
406 struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev); 409 struct tpo_td043_device *tpo_td043 = g_tpo_td043;
407 int nreset_gpio = dssdev->reset_gpio; 410 int nreset_gpio = dssdev->reset_gpio;
408 int ret = 0; 411 int ret = 0;
409 412
@@ -440,6 +443,8 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev)
440 if (ret) 443 if (ret)
441 dev_warn(&dssdev->dev, "failed to create sysfs files\n"); 444 dev_warn(&dssdev->dev, "failed to create sysfs files\n");
442 445
446 dev_set_drvdata(&dssdev->dev, tpo_td043);
447
443 return 0; 448 return 0;
444 449
445fail_gpio_req: 450fail_gpio_req:
@@ -505,6 +510,9 @@ static int tpo_td043_spi_probe(struct spi_device *spi)
505 return -ENODEV; 510 return -ENODEV;
506 } 511 }
507 512
513 if (g_tpo_td043 != NULL)
514 return -EBUSY;
515
508 spi->bits_per_word = 16; 516 spi->bits_per_word = 16;
509 spi->mode = SPI_MODE_0; 517 spi->mode = SPI_MODE_0;
510 518
@@ -521,7 +529,7 @@ static int tpo_td043_spi_probe(struct spi_device *spi)
521 tpo_td043->spi = spi; 529 tpo_td043->spi = spi;
522 tpo_td043->nreset_gpio = dssdev->reset_gpio; 530 tpo_td043->nreset_gpio = dssdev->reset_gpio;
523 dev_set_drvdata(&spi->dev, tpo_td043); 531 dev_set_drvdata(&spi->dev, tpo_td043);
524 dev_set_drvdata(&dssdev->dev, tpo_td043); 532 g_tpo_td043 = tpo_td043;
525 533
526 omap_dss_register_driver(&tpo_td043_driver); 534 omap_dss_register_driver(&tpo_td043_driver);
527 535
@@ -534,6 +542,7 @@ static int tpo_td043_spi_remove(struct spi_device *spi)
534 542
535 omap_dss_unregister_driver(&tpo_td043_driver); 543 omap_dss_unregister_driver(&tpo_td043_driver);
536 kfree(tpo_td043); 544 kfree(tpo_td043);
545 g_tpo_td043 = NULL;
537 546
538 return 0; 547 return 0;
539} 548}
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index d7d66ef5cb58..7f791aeda4d2 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -202,12 +202,10 @@ static const enum omap_dss_output_id omap3630_dss_supported_outputs[] = {
202 202
203static const enum omap_dss_output_id omap4_dss_supported_outputs[] = { 203static const enum omap_dss_output_id omap4_dss_supported_outputs[] = {
204 /* OMAP_DSS_CHANNEL_LCD */ 204 /* OMAP_DSS_CHANNEL_LCD */
205 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | 205 OMAP_DSS_OUTPUT_DBI | OMAP_DSS_OUTPUT_DSI1,
206 OMAP_DSS_OUTPUT_DSI1,
207 206
208 /* OMAP_DSS_CHANNEL_DIGIT */ 207 /* OMAP_DSS_CHANNEL_DIGIT */
209 OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI | 208 OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI,
210 OMAP_DSS_OUTPUT_DPI,
211 209
212 /* OMAP_DSS_CHANNEL_LCD2 */ 210 /* OMAP_DSS_CHANNEL_LCD2 */
213 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | 211 OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index e3b8f757d2d3..0e9d8c479c35 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -40,13 +40,12 @@
40#include "sp5100_tco.h" 40#include "sp5100_tco.h"
41 41
42/* Module and version information */ 42/* Module and version information */
43#define TCO_VERSION "0.03" 43#define TCO_VERSION "0.05"
44#define TCO_MODULE_NAME "SP5100 TCO timer" 44#define TCO_MODULE_NAME "SP5100 TCO timer"
45#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION 45#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
46 46
47/* internal variables */ 47/* internal variables */
48static u32 tcobase_phys; 48static u32 tcobase_phys;
49static u32 resbase_phys;
50static u32 tco_wdt_fired; 49static u32 tco_wdt_fired;
51static void __iomem *tcobase; 50static void __iomem *tcobase;
52static unsigned int pm_iobase; 51static unsigned int pm_iobase;
@@ -54,10 +53,6 @@ static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */
54static unsigned long timer_alive; 53static unsigned long timer_alive;
55static char tco_expect_close; 54static char tco_expect_close;
56static struct pci_dev *sp5100_tco_pci; 55static struct pci_dev *sp5100_tco_pci;
57static struct resource wdt_res = {
58 .name = "Watchdog Timer",
59 .flags = IORESOURCE_MEM,
60};
61 56
62/* the watchdog platform device */ 57/* the watchdog platform device */
63static struct platform_device *sp5100_tco_platform_device; 58static struct platform_device *sp5100_tco_platform_device;
@@ -75,12 +70,6 @@ module_param(nowayout, bool, 0);
75MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started." 70MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started."
76 " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 71 " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
77 72
78static unsigned int force_addr;
79module_param(force_addr, uint, 0);
80MODULE_PARM_DESC(force_addr, "Force the use of specified MMIO address."
81 " ONLY USE THIS PARAMETER IF YOU REALLY KNOW"
82 " WHAT YOU ARE DOING (default=none)");
83
84/* 73/*
85 * Some TCO specific functions 74 * Some TCO specific functions
86 */ 75 */
@@ -176,39 +165,6 @@ static void tco_timer_enable(void)
176 } 165 }
177} 166}
178 167
179static void tco_timer_disable(void)
180{
181 int val;
182
183 if (sp5100_tco_pci->revision >= 0x40) {
184 /* For SB800 or later */
185 /* Enable watchdog decode bit and Disable watchdog timer */
186 outb(SB800_PM_WATCHDOG_CONTROL, SB800_IO_PM_INDEX_REG);
187 val = inb(SB800_IO_PM_DATA_REG);
188 val |= SB800_PCI_WATCHDOG_DECODE_EN;
189 val |= SB800_PM_WATCHDOG_DISABLE;
190 outb(val, SB800_IO_PM_DATA_REG);
191 } else {
192 /* For SP5100 or SB7x0 */
193 /* Enable watchdog decode bit */
194 pci_read_config_dword(sp5100_tco_pci,
195 SP5100_PCI_WATCHDOG_MISC_REG,
196 &val);
197
198 val |= SP5100_PCI_WATCHDOG_DECODE_EN;
199
200 pci_write_config_dword(sp5100_tco_pci,
201 SP5100_PCI_WATCHDOG_MISC_REG,
202 val);
203
204 /* Disable Watchdog timer */
205 outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
206 val = inb(SP5100_IO_PM_DATA_REG);
207 val |= SP5100_PM_WATCHDOG_DISABLE;
208 outb(val, SP5100_IO_PM_DATA_REG);
209 }
210}
211
212/* 168/*
213 * /dev/watchdog handling 169 * /dev/watchdog handling
214 */ 170 */
@@ -361,7 +317,7 @@ static unsigned char sp5100_tco_setupdevice(void)
361{ 317{
362 struct pci_dev *dev = NULL; 318 struct pci_dev *dev = NULL;
363 const char *dev_name = NULL; 319 const char *dev_name = NULL;
364 u32 val, tmp_val; 320 u32 val;
365 u32 index_reg, data_reg, base_addr; 321 u32 index_reg, data_reg, base_addr;
366 322
367 /* Match the PCI device */ 323 /* Match the PCI device */
@@ -459,63 +415,8 @@ static unsigned char sp5100_tco_setupdevice(void)
459 } else 415 } else
460 pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val); 416 pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val);
461 417
462 /* 418 pr_notice("failed to find MMIO address, giving up.\n");
463 * Lastly re-programming the watchdog timer MMIO address, 419 goto unreg_region;
464 * This method is a last resort...
465 *
466 * Before re-programming, to ensure that the watchdog timer
467 * is disabled, disable the watchdog timer.
468 */
469 tco_timer_disable();
470
471 if (force_addr) {
472 /*
473 * Force the use of watchdog timer MMIO address, and aligned to
474 * 8byte boundary.
475 */
476 force_addr &= ~0x7;
477 val = force_addr;
478
479 pr_info("Force the use of 0x%04x as MMIO address\n", val);
480 } else {
481 /*
482 * Get empty slot into the resource tree for watchdog timer.
483 */
484 if (allocate_resource(&iomem_resource,
485 &wdt_res,
486 SP5100_WDT_MEM_MAP_SIZE,
487 0xf0000000,
488 0xfffffff8,
489 0x8,
490 NULL,
491 NULL)) {
492 pr_err("MMIO allocation failed\n");
493 goto unreg_region;
494 }
495
496 val = resbase_phys = wdt_res.start;
497 pr_debug("Got 0x%04x from resource tree\n", val);
498 }
499
500 /* Restore to the low three bits */
501 outb(base_addr+0, index_reg);
502 tmp_val = val | (inb(data_reg) & 0x7);
503
504 /* Re-programming the watchdog timer base address */
505 outb(base_addr+0, index_reg);
506 outb((tmp_val >> 0) & 0xff, data_reg);
507 outb(base_addr+1, index_reg);
508 outb((tmp_val >> 8) & 0xff, data_reg);
509 outb(base_addr+2, index_reg);
510 outb((tmp_val >> 16) & 0xff, data_reg);
511 outb(base_addr+3, index_reg);
512 outb((tmp_val >> 24) & 0xff, data_reg);
513
514 if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
515 dev_name)) {
516 pr_err("MMIO address 0x%04x already in use\n", val);
517 goto unreg_resource;
518 }
519 420
520setup_wdt: 421setup_wdt:
521 tcobase_phys = val; 422 tcobase_phys = val;
@@ -555,9 +456,6 @@ setup_wdt:
555 456
556unreg_mem_region: 457unreg_mem_region:
557 release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); 458 release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
558unreg_resource:
559 if (resbase_phys)
560 release_resource(&wdt_res);
561unreg_region: 459unreg_region:
562 release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); 460 release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
563exit: 461exit:
@@ -567,7 +465,6 @@ exit:
567static int sp5100_tco_init(struct platform_device *dev) 465static int sp5100_tco_init(struct platform_device *dev)
568{ 466{
569 int ret; 467 int ret;
570 char addr_str[16];
571 468
572 /* 469 /*
573 * Check whether or not the hardware watchdog is there. If found, then 470 * Check whether or not the hardware watchdog is there. If found, then
@@ -599,23 +496,14 @@ static int sp5100_tco_init(struct platform_device *dev)
599 clear_bit(0, &timer_alive); 496 clear_bit(0, &timer_alive);
600 497
601 /* Show module parameters */ 498 /* Show module parameters */
602 if (force_addr == tcobase_phys) 499 pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
603 /* The force_addr is vaild */ 500 tcobase, heartbeat, nowayout);
604 sprintf(addr_str, "0x%04x", force_addr);
605 else
606 strcpy(addr_str, "none");
607
608 pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d, "
609 "force_addr=%s)\n",
610 tcobase, heartbeat, nowayout, addr_str);
611 501
612 return 0; 502 return 0;
613 503
614exit: 504exit:
615 iounmap(tcobase); 505 iounmap(tcobase);
616 release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); 506 release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
617 if (resbase_phys)
618 release_resource(&wdt_res);
619 release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); 507 release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
620 return ret; 508 return ret;
621} 509}
@@ -630,8 +518,6 @@ static void sp5100_tco_cleanup(void)
630 misc_deregister(&sp5100_tco_miscdev); 518 misc_deregister(&sp5100_tco_miscdev);
631 iounmap(tcobase); 519 iounmap(tcobase);
632 release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); 520 release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
633 if (resbase_phys)
634 release_resource(&wdt_res);
635 release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); 521 release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
636} 522}
637 523
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
index 71594a0c14b7..2b28c00da0df 100644
--- a/drivers/watchdog/sp5100_tco.h
+++ b/drivers/watchdog/sp5100_tco.h
@@ -57,7 +57,7 @@
57#define SB800_PM_WATCHDOG_DISABLE (1 << 2) 57#define SB800_PM_WATCHDOG_DISABLE (1 << 2)
58#define SB800_PM_WATCHDOG_SECOND_RES (3 << 0) 58#define SB800_PM_WATCHDOG_SECOND_RES (3 << 0)
59#define SB800_ACPI_MMIO_DECODE_EN (1 << 0) 59#define SB800_ACPI_MMIO_DECODE_EN (1 << 0)
60#define SB800_ACPI_MMIO_SEL (1 << 2) 60#define SB800_ACPI_MMIO_SEL (1 << 1)
61 61
62 62
63#define SB800_PM_WDT_MMIO_OFFSET 0xB00 63#define SB800_PM_WDT_MMIO_OFFSET 0xB00
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 5a32232cf7c1..67af155cf602 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -182,7 +182,7 @@ config XEN_PRIVCMD
182 182
183config XEN_STUB 183config XEN_STUB
184 bool "Xen stub drivers" 184 bool "Xen stub drivers"
185 depends on XEN && X86_64 185 depends on XEN && X86_64 && BROKEN
186 default n 186 default n
187 help 187 help
188 Allow kernel to install stub drivers, to reserve space for Xen drivers, 188 Allow kernel to install stub drivers, to reserve space for Xen drivers,
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index d17aa41a9041..aa85881d17b2 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -403,11 +403,23 @@ static void unmask_evtchn(int port)
403 403
404 if (unlikely((cpu != cpu_from_evtchn(port)))) 404 if (unlikely((cpu != cpu_from_evtchn(port))))
405 do_hypercall = 1; 405 do_hypercall = 1;
406 else 406 else {
407 /*
408 * Need to clear the mask before checking pending to
409 * avoid a race with an event becoming pending.
410 *
411 * EVTCHNOP_unmask will only trigger an upcall if the
412 * mask bit was set, so if a hypercall is needed
413 * remask the event.
414 */
415 sync_clear_bit(port, BM(&s->evtchn_mask[0]));
407 evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0])); 416 evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
408 417
409 if (unlikely(evtchn_pending && xen_hvm_domain())) 418 if (unlikely(evtchn_pending && xen_hvm_domain())) {
410 do_hypercall = 1; 419 sync_set_bit(port, BM(&s->evtchn_mask[0]));
420 do_hypercall = 1;
421 }
422 }
411 423
412 /* Slow path (hypercall) if this is a non-local port or if this is 424 /* Slow path (hypercall) if this is a non-local port or if this is
413 * an hvm domain and an event is pending (hvm domains don't have 425 * an hvm domain and an event is pending (hvm domains don't have
@@ -418,8 +430,6 @@ static void unmask_evtchn(int port)
418 } else { 430 } else {
419 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); 431 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
420 432
421 sync_clear_bit(port, BM(&s->evtchn_mask[0]));
422
423 /* 433 /*
424 * The following is basically the equivalent of 434 * The following is basically the equivalent of
425 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose 435 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
diff --git a/drivers/xen/fallback.c b/drivers/xen/fallback.c
index 0ef7c4d40f86..b04fb64c5a91 100644
--- a/drivers/xen/fallback.c
+++ b/drivers/xen/fallback.c
@@ -44,7 +44,7 @@ int xen_event_channel_op_compat(int cmd, void *arg)
44} 44}
45EXPORT_SYMBOL_GPL(xen_event_channel_op_compat); 45EXPORT_SYMBOL_GPL(xen_event_channel_op_compat);
46 46
47int HYPERVISOR_physdev_op_compat(int cmd, void *arg) 47int xen_physdev_op_compat(int cmd, void *arg)
48{ 48{
49 struct physdev_op op; 49 struct physdev_op op;
50 int rc; 50 int rc;
@@ -78,3 +78,4 @@ int HYPERVISOR_physdev_op_compat(int cmd, void *arg)
78 78
79 return rc; 79 return rc;
80} 80}
81EXPORT_SYMBOL_GPL(xen_physdev_op_compat);
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index f3278a6603ca..90e34ac7e522 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -505,6 +505,9 @@ static int __init xen_acpi_processor_init(void)
505 505
506 pr = per_cpu(processors, i); 506 pr = per_cpu(processors, i);
507 perf = per_cpu_ptr(acpi_perf_data, i); 507 perf = per_cpu_ptr(acpi_perf_data, i);
508 if (!pr)
509 continue;
510
508 pr->performance = perf; 511 pr->performance = perf;
509 rc = acpi_processor_get_performance_info(pr); 512 rc = acpi_processor_get_performance_info(pr);
510 if (rc) 513 if (rc)
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 9204126f1560..a2278ba7fb27 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -17,6 +17,7 @@
17#include <xen/events.h> 17#include <xen/events.h>
18#include <asm/xen/pci.h> 18#include <asm/xen/pci.h>
19#include <asm/xen/hypervisor.h> 19#include <asm/xen/hypervisor.h>
20#include <xen/interface/physdev.h>
20#include "pciback.h" 21#include "pciback.h"
21#include "conf_space.h" 22#include "conf_space.h"
22#include "conf_space_quirks.h" 23#include "conf_space_quirks.h"
@@ -85,37 +86,52 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
85static void pcistub_device_release(struct kref *kref) 86static void pcistub_device_release(struct kref *kref)
86{ 87{
87 struct pcistub_device *psdev; 88 struct pcistub_device *psdev;
89 struct pci_dev *dev;
88 struct xen_pcibk_dev_data *dev_data; 90 struct xen_pcibk_dev_data *dev_data;
89 91
90 psdev = container_of(kref, struct pcistub_device, kref); 92 psdev = container_of(kref, struct pcistub_device, kref);
91 dev_data = pci_get_drvdata(psdev->dev); 93 dev = psdev->dev;
94 dev_data = pci_get_drvdata(dev);
92 95
93 dev_dbg(&psdev->dev->dev, "pcistub_device_release\n"); 96 dev_dbg(&dev->dev, "pcistub_device_release\n");
94 97
95 xen_unregister_device_domain_owner(psdev->dev); 98 xen_unregister_device_domain_owner(dev);
96 99
97 /* Call the reset function which does not take lock as this 100 /* Call the reset function which does not take lock as this
98 * is called from "unbind" which takes a device_lock mutex. 101 * is called from "unbind" which takes a device_lock mutex.
99 */ 102 */
100 __pci_reset_function_locked(psdev->dev); 103 __pci_reset_function_locked(dev);
101 if (pci_load_and_free_saved_state(psdev->dev, 104 if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
102 &dev_data->pci_saved_state)) { 105 dev_dbg(&dev->dev, "Could not reload PCI state\n");
103 dev_dbg(&psdev->dev->dev, "Could not reload PCI state\n"); 106 else
104 } else 107 pci_restore_state(dev);
105 pci_restore_state(psdev->dev); 108
109 if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
110 struct physdev_pci_device ppdev = {
111 .seg = pci_domain_nr(dev->bus),
112 .bus = dev->bus->number,
113 .devfn = dev->devfn
114 };
115 int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
116 &ppdev);
117
118 if (err)
119 dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
120 err);
121 }
106 122
107 /* Disable the device */ 123 /* Disable the device */
108 xen_pcibk_reset_device(psdev->dev); 124 xen_pcibk_reset_device(dev);
109 125
110 kfree(dev_data); 126 kfree(dev_data);
111 pci_set_drvdata(psdev->dev, NULL); 127 pci_set_drvdata(dev, NULL);
112 128
113 /* Clean-up the device */ 129 /* Clean-up the device */
114 xen_pcibk_config_free_dyn_fields(psdev->dev); 130 xen_pcibk_config_free_dyn_fields(dev);
115 xen_pcibk_config_free_dev(psdev->dev); 131 xen_pcibk_config_free_dev(dev);
116 132
117 psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; 133 dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
118 pci_dev_put(psdev->dev); 134 pci_dev_put(dev);
119 135
120 kfree(psdev); 136 kfree(psdev);
121} 137}
@@ -355,6 +371,19 @@ static int pcistub_init_device(struct pci_dev *dev)
355 if (err) 371 if (err)
356 goto config_release; 372 goto config_release;
357 373
374 if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
375 struct physdev_pci_device ppdev = {
376 .seg = pci_domain_nr(dev->bus),
377 .bus = dev->bus->number,
378 .devfn = dev->devfn
379 };
380
381 err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
382 if (err)
383 dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
384 err);
385 }
386
358 /* We need the device active to save the state. */ 387 /* We need the device active to save the state. */
359 dev_dbg(&dev->dev, "save state of device\n"); 388 dev_dbg(&dev->dev, "save state of device\n");
360 pci_save_state(dev); 389 pci_save_state(dev);
diff --git a/firmware/Makefile b/firmware/Makefile
index cbb09ce9730a..5d8ee1319b5c 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -82,7 +82,7 @@ fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
82fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \ 82fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \
83 qlogic/12160.bin 83 qlogic/12160.bin
84fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin 84fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin
85fw-shipped-$(CONFIG_INFINIBAND_QIB) += qlogic/sd7220.fw 85fw-shipped-$(CONFIG_INFINIBAND_QIB) += intel/sd7220.fw
86fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp 86fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp
87fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \ 87fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \
88 ess/maestro3_assp_minisrc.fw 88 ess/maestro3_assp_minisrc.fw
diff --git a/firmware/qlogic/sd7220.fw.ihex b/firmware/intel/sd7220.fw.ihex
index a33636319112..a33636319112 100644
--- a/firmware/qlogic/sd7220.fw.ihex
+++ b/firmware/intel/sd7220.fw.ihex
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index ecd25a1b4e51..ca9d8f1a3bb6 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -651,6 +651,8 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
651 if (tree_mod_dont_log(fs_info, NULL)) 651 if (tree_mod_dont_log(fs_info, NULL))
652 return 0; 652 return 0;
653 653
654 __tree_mod_log_free_eb(fs_info, old_root);
655
654 ret = tree_mod_alloc(fs_info, flags, &tm); 656 ret = tree_mod_alloc(fs_info, flags, &tm);
655 if (ret < 0) 657 if (ret < 0)
656 goto out; 658 goto out;
@@ -736,7 +738,7 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
736static noinline void 738static noinline void
737tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, 739tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
738 struct extent_buffer *src, unsigned long dst_offset, 740 struct extent_buffer *src, unsigned long dst_offset,
739 unsigned long src_offset, int nr_items) 741 unsigned long src_offset, int nr_items, int log_removal)
740{ 742{
741 int ret; 743 int ret;
742 int i; 744 int i;
@@ -750,10 +752,12 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
750 } 752 }
751 753
752 for (i = 0; i < nr_items; i++) { 754 for (i = 0; i < nr_items; i++) {
753 ret = tree_mod_log_insert_key_locked(fs_info, src, 755 if (log_removal) {
754 i + src_offset, 756 ret = tree_mod_log_insert_key_locked(fs_info, src,
755 MOD_LOG_KEY_REMOVE); 757 i + src_offset,
756 BUG_ON(ret < 0); 758 MOD_LOG_KEY_REMOVE);
759 BUG_ON(ret < 0);
760 }
757 ret = tree_mod_log_insert_key_locked(fs_info, dst, 761 ret = tree_mod_log_insert_key_locked(fs_info, dst,
758 i + dst_offset, 762 i + dst_offset,
759 MOD_LOG_KEY_ADD); 763 MOD_LOG_KEY_ADD);
@@ -927,7 +931,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
927 ret = btrfs_dec_ref(trans, root, buf, 1, 1); 931 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
928 BUG_ON(ret); /* -ENOMEM */ 932 BUG_ON(ret); /* -ENOMEM */
929 } 933 }
930 tree_mod_log_free_eb(root->fs_info, buf);
931 clean_tree_block(trans, root, buf); 934 clean_tree_block(trans, root, buf);
932 *last_ref = 1; 935 *last_ref = 1;
933 } 936 }
@@ -1046,6 +1049,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1046 btrfs_set_node_ptr_generation(parent, parent_slot, 1049 btrfs_set_node_ptr_generation(parent, parent_slot,
1047 trans->transid); 1050 trans->transid);
1048 btrfs_mark_buffer_dirty(parent); 1051 btrfs_mark_buffer_dirty(parent);
1052 tree_mod_log_free_eb(root->fs_info, buf);
1049 btrfs_free_tree_block(trans, root, buf, parent_start, 1053 btrfs_free_tree_block(trans, root, buf, parent_start,
1050 last_ref); 1054 last_ref);
1051 } 1055 }
@@ -1750,7 +1754,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1750 goto enospc; 1754 goto enospc;
1751 } 1755 }
1752 1756
1753 tree_mod_log_free_eb(root->fs_info, root->node);
1754 tree_mod_log_set_root_pointer(root, child); 1757 tree_mod_log_set_root_pointer(root, child);
1755 rcu_assign_pointer(root->node, child); 1758 rcu_assign_pointer(root->node, child);
1756 1759
@@ -2995,7 +2998,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
2995 push_items = min(src_nritems - 8, push_items); 2998 push_items = min(src_nritems - 8, push_items);
2996 2999
2997 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, 3000 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
2998 push_items); 3001 push_items, 1);
2999 copy_extent_buffer(dst, src, 3002 copy_extent_buffer(dst, src,
3000 btrfs_node_key_ptr_offset(dst_nritems), 3003 btrfs_node_key_ptr_offset(dst_nritems),
3001 btrfs_node_key_ptr_offset(0), 3004 btrfs_node_key_ptr_offset(0),
@@ -3066,7 +3069,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
3066 sizeof(struct btrfs_key_ptr)); 3069 sizeof(struct btrfs_key_ptr));
3067 3070
3068 tree_mod_log_eb_copy(root->fs_info, dst, src, 0, 3071 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3069 src_nritems - push_items, push_items); 3072 src_nritems - push_items, push_items, 1);
3070 copy_extent_buffer(dst, src, 3073 copy_extent_buffer(dst, src,
3071 btrfs_node_key_ptr_offset(0), 3074 btrfs_node_key_ptr_offset(0),
3072 btrfs_node_key_ptr_offset(src_nritems - push_items), 3075 btrfs_node_key_ptr_offset(src_nritems - push_items),
@@ -3218,12 +3221,18 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
3218 int mid; 3221 int mid;
3219 int ret; 3222 int ret;
3220 u32 c_nritems; 3223 u32 c_nritems;
3224 int tree_mod_log_removal = 1;
3221 3225
3222 c = path->nodes[level]; 3226 c = path->nodes[level];
3223 WARN_ON(btrfs_header_generation(c) != trans->transid); 3227 WARN_ON(btrfs_header_generation(c) != trans->transid);
3224 if (c == root->node) { 3228 if (c == root->node) {
3225 /* trying to split the root, lets make a new one */ 3229 /* trying to split the root, lets make a new one */
3226 ret = insert_new_root(trans, root, path, level + 1); 3230 ret = insert_new_root(trans, root, path, level + 1);
3231 /*
3232 * removal of root nodes has been logged by
3233 * tree_mod_log_set_root_pointer due to locking
3234 */
3235 tree_mod_log_removal = 0;
3227 if (ret) 3236 if (ret)
3228 return ret; 3237 return ret;
3229 } else { 3238 } else {
@@ -3261,7 +3270,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
3261 (unsigned long)btrfs_header_chunk_tree_uuid(split), 3270 (unsigned long)btrfs_header_chunk_tree_uuid(split),
3262 BTRFS_UUID_SIZE); 3271 BTRFS_UUID_SIZE);
3263 3272
3264 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid); 3273 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid,
3274 tree_mod_log_removal);
3265 copy_extent_buffer(split, c, 3275 copy_extent_buffer(split, c,
3266 btrfs_node_key_ptr_offset(0), 3276 btrfs_node_key_ptr_offset(0),
3267 btrfs_node_key_ptr_offset(mid), 3277 btrfs_node_key_ptr_offset(mid),
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 7d84651e850b..6d19a0a554aa 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1291,6 +1291,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1291 0, objectid, NULL, 0, 0, 0); 1291 0, objectid, NULL, 0, 0, 0);
1292 if (IS_ERR(leaf)) { 1292 if (IS_ERR(leaf)) {
1293 ret = PTR_ERR(leaf); 1293 ret = PTR_ERR(leaf);
1294 leaf = NULL;
1294 goto fail; 1295 goto fail;
1295 } 1296 }
1296 1297
@@ -1334,11 +1335,16 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1334 1335
1335 btrfs_tree_unlock(leaf); 1336 btrfs_tree_unlock(leaf);
1336 1337
1338 return root;
1339
1337fail: 1340fail:
1338 if (ret) 1341 if (leaf) {
1339 return ERR_PTR(ret); 1342 btrfs_tree_unlock(leaf);
1343 free_extent_buffer(leaf);
1344 }
1345 kfree(root);
1340 1346
1341 return root; 1347 return ERR_PTR(ret);
1342} 1348}
1343 1349
1344static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, 1350static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
@@ -3253,7 +3259,7 @@ void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
3253 if (btrfs_root_refs(&root->root_item) == 0) 3259 if (btrfs_root_refs(&root->root_item) == 0)
3254 synchronize_srcu(&fs_info->subvol_srcu); 3260 synchronize_srcu(&fs_info->subvol_srcu);
3255 3261
3256 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 3262 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3257 btrfs_free_log(NULL, root); 3263 btrfs_free_log(NULL, root);
3258 btrfs_free_log_root_tree(NULL, fs_info); 3264 btrfs_free_log_root_tree(NULL, fs_info);
3259 } 3265 }
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9ac2eca681eb..3d551231caba 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -257,7 +257,8 @@ static int exclude_super_stripes(struct btrfs_root *root,
257 cache->bytes_super += stripe_len; 257 cache->bytes_super += stripe_len;
258 ret = add_excluded_extent(root, cache->key.objectid, 258 ret = add_excluded_extent(root, cache->key.objectid,
259 stripe_len); 259 stripe_len);
260 BUG_ON(ret); /* -ENOMEM */ 260 if (ret)
261 return ret;
261 } 262 }
262 263
263 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 264 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
@@ -265,13 +266,17 @@ static int exclude_super_stripes(struct btrfs_root *root,
265 ret = btrfs_rmap_block(&root->fs_info->mapping_tree, 266 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
266 cache->key.objectid, bytenr, 267 cache->key.objectid, bytenr,
267 0, &logical, &nr, &stripe_len); 268 0, &logical, &nr, &stripe_len);
268 BUG_ON(ret); /* -ENOMEM */ 269 if (ret)
270 return ret;
269 271
270 while (nr--) { 272 while (nr--) {
271 cache->bytes_super += stripe_len; 273 cache->bytes_super += stripe_len;
272 ret = add_excluded_extent(root, logical[nr], 274 ret = add_excluded_extent(root, logical[nr],
273 stripe_len); 275 stripe_len);
274 BUG_ON(ret); /* -ENOMEM */ 276 if (ret) {
277 kfree(logical);
278 return ret;
279 }
275 } 280 }
276 281
277 kfree(logical); 282 kfree(logical);
@@ -4438,7 +4443,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4438 spin_lock(&sinfo->lock); 4443 spin_lock(&sinfo->lock);
4439 spin_lock(&block_rsv->lock); 4444 spin_lock(&block_rsv->lock);
4440 4445
4441 block_rsv->size = num_bytes; 4446 block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4442 4447
4443 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + 4448 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4444 sinfo->bytes_reserved + sinfo->bytes_readonly + 4449 sinfo->bytes_reserved + sinfo->bytes_readonly +
@@ -4793,14 +4798,49 @@ out_fail:
4793 * If the inodes csum_bytes is the same as the original 4798 * If the inodes csum_bytes is the same as the original
4794 * csum_bytes then we know we haven't raced with any free()ers 4799 * csum_bytes then we know we haven't raced with any free()ers
4795 * so we can just reduce our inodes csum bytes and carry on. 4800 * so we can just reduce our inodes csum bytes and carry on.
4796 * Otherwise we have to do the normal free thing to account for
4797 * the case that the free side didn't free up its reserve
4798 * because of this outstanding reservation.
4799 */ 4801 */
4800 if (BTRFS_I(inode)->csum_bytes == csum_bytes) 4802 if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
4801 calc_csum_metadata_size(inode, num_bytes, 0); 4803 calc_csum_metadata_size(inode, num_bytes, 0);
4802 else 4804 } else {
4803 to_free = calc_csum_metadata_size(inode, num_bytes, 0); 4805 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
4806 u64 bytes;
4807
4808 /*
4809 * This is tricky, but first we need to figure out how much we
4810 * free'd from any free-ers that occured during this
4811 * reservation, so we reset ->csum_bytes to the csum_bytes
4812 * before we dropped our lock, and then call the free for the
4813 * number of bytes that were freed while we were trying our
4814 * reservation.
4815 */
4816 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
4817 BTRFS_I(inode)->csum_bytes = csum_bytes;
4818 to_free = calc_csum_metadata_size(inode, bytes, 0);
4819
4820
4821 /*
4822 * Now we need to see how much we would have freed had we not
4823 * been making this reservation and our ->csum_bytes were not
4824 * artificially inflated.
4825 */
4826 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
4827 bytes = csum_bytes - orig_csum_bytes;
4828 bytes = calc_csum_metadata_size(inode, bytes, 0);
4829
4830 /*
4831 * Now reset ->csum_bytes to what it should be. If bytes is
4832 * more than to_free then we would have free'd more space had we
4833 * not had an artificially high ->csum_bytes, so we need to free
4834 * the remainder. If bytes is the same or less then we don't
4835 * need to do anything, the other free-ers did the correct
4836 * thing.
4837 */
4838 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
4839 if (bytes > to_free)
4840 to_free = bytes - to_free;
4841 else
4842 to_free = 0;
4843 }
4804 spin_unlock(&BTRFS_I(inode)->lock); 4844 spin_unlock(&BTRFS_I(inode)->lock);
4805 if (dropped) 4845 if (dropped)
4806 to_free += btrfs_calc_trans_metadata_size(root, dropped); 4846 to_free += btrfs_calc_trans_metadata_size(root, dropped);
@@ -7947,7 +7987,17 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7947 * info has super bytes accounted for, otherwise we'll think 7987 * info has super bytes accounted for, otherwise we'll think
7948 * we have more space than we actually do. 7988 * we have more space than we actually do.
7949 */ 7989 */
7950 exclude_super_stripes(root, cache); 7990 ret = exclude_super_stripes(root, cache);
7991 if (ret) {
7992 /*
7993 * We may have excluded something, so call this just in
7994 * case.
7995 */
7996 free_excluded_extents(root, cache);
7997 kfree(cache->free_space_ctl);
7998 kfree(cache);
7999 goto error;
8000 }
7951 8001
7952 /* 8002 /*
7953 * check for two cases, either we are full, and therefore 8003 * check for two cases, either we are full, and therefore
@@ -8089,7 +8139,17 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8089 8139
8090 cache->last_byte_to_unpin = (u64)-1; 8140 cache->last_byte_to_unpin = (u64)-1;
8091 cache->cached = BTRFS_CACHE_FINISHED; 8141 cache->cached = BTRFS_CACHE_FINISHED;
8092 exclude_super_stripes(root, cache); 8142 ret = exclude_super_stripes(root, cache);
8143 if (ret) {
8144 /*
8145 * We may have excluded something, so call this just in
8146 * case.
8147 */
8148 free_excluded_extents(root, cache);
8149 kfree(cache->free_space_ctl);
8150 kfree(cache);
8151 return ret;
8152 }
8093 8153
8094 add_new_free_space(cache, root->fs_info, chunk_offset, 8154 add_new_free_space(cache, root->fs_info, chunk_offset,
8095 chunk_offset + size); 8155 chunk_offset + size);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index bed072aa461f..73f2bfe3ac93 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1257,6 +1257,39 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1257 GFP_NOFS); 1257 GFP_NOFS);
1258} 1258}
1259 1259
1260int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1261{
1262 unsigned long index = start >> PAGE_CACHE_SHIFT;
1263 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1264 struct page *page;
1265
1266 while (index <= end_index) {
1267 page = find_get_page(inode->i_mapping, index);
1268 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1269 clear_page_dirty_for_io(page);
1270 page_cache_release(page);
1271 index++;
1272 }
1273 return 0;
1274}
1275
1276int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1277{
1278 unsigned long index = start >> PAGE_CACHE_SHIFT;
1279 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1280 struct page *page;
1281
1282 while (index <= end_index) {
1283 page = find_get_page(inode->i_mapping, index);
1284 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1285 account_page_redirty(page);
1286 __set_page_dirty_nobuffers(page);
1287 page_cache_release(page);
1288 index++;
1289 }
1290 return 0;
1291}
1292
1260/* 1293/*
1261 * helper function to set both pages and extents in the tree writeback 1294 * helper function to set both pages and extents in the tree writeback
1262 */ 1295 */
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 6068a1985560..258c92156857 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -325,6 +325,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
325 unsigned long *map_len); 325 unsigned long *map_len);
326int extent_range_uptodate(struct extent_io_tree *tree, 326int extent_range_uptodate(struct extent_io_tree *tree,
327 u64 start, u64 end); 327 u64 start, u64 end);
328int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
329int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
328int extent_clear_unlock_delalloc(struct inode *inode, 330int extent_clear_unlock_delalloc(struct inode *inode,
329 struct extent_io_tree *tree, 331 struct extent_io_tree *tree,
330 u64 start, u64 end, struct page *locked_page, 332 u64 start, u64 end, struct page *locked_page,
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index ec160202be3e..c4628a201cb3 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -118,9 +118,11 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
118 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); 118 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
119 csums_in_item /= csum_size; 119 csums_in_item /= csum_size;
120 120
121 if (csum_offset >= csums_in_item) { 121 if (csum_offset == csums_in_item) {
122 ret = -EFBIG; 122 ret = -EFBIG;
123 goto fail; 123 goto fail;
124 } else if (csum_offset > csums_in_item) {
125 goto fail;
124 } 126 }
125 } 127 }
126 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 128 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
@@ -728,7 +730,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
728 return -ENOMEM; 730 return -ENOMEM;
729 731
730 sector_sum = sums->sums; 732 sector_sum = sums->sums;
731 trans->adding_csums = 1;
732again: 733again:
733 next_offset = (u64)-1; 734 next_offset = (u64)-1;
734 found_next = 0; 735 found_next = 0;
@@ -899,7 +900,6 @@ next_sector:
899 goto again; 900 goto again;
900 } 901 }
901out: 902out:
902 trans->adding_csums = 0;
903 btrfs_free_path(path); 903 btrfs_free_path(path);
904 return ret; 904 return ret;
905 905
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 5b4ea5f55b8f..ade03e6f7bd2 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2142,6 +2142,7 @@ static long btrfs_fallocate(struct file *file, int mode,
2142{ 2142{
2143 struct inode *inode = file_inode(file); 2143 struct inode *inode = file_inode(file);
2144 struct extent_state *cached_state = NULL; 2144 struct extent_state *cached_state = NULL;
2145 struct btrfs_root *root = BTRFS_I(inode)->root;
2145 u64 cur_offset; 2146 u64 cur_offset;
2146 u64 last_byte; 2147 u64 last_byte;
2147 u64 alloc_start; 2148 u64 alloc_start;
@@ -2169,6 +2170,11 @@ static long btrfs_fallocate(struct file *file, int mode,
2169 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); 2170 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
2170 if (ret) 2171 if (ret)
2171 return ret; 2172 return ret;
2173 if (root->fs_info->quota_enabled) {
2174 ret = btrfs_qgroup_reserve(root, alloc_end - alloc_start);
2175 if (ret)
2176 goto out_reserve_fail;
2177 }
2172 2178
2173 /* 2179 /*
2174 * wait for ordered IO before we have any locks. We'll loop again 2180 * wait for ordered IO before we have any locks. We'll loop again
@@ -2272,6 +2278,9 @@ static long btrfs_fallocate(struct file *file, int mode,
2272 &cached_state, GFP_NOFS); 2278 &cached_state, GFP_NOFS);
2273out: 2279out:
2274 mutex_unlock(&inode->i_mutex); 2280 mutex_unlock(&inode->i_mutex);
2281 if (root->fs_info->quota_enabled)
2282 btrfs_qgroup_free(root, alloc_end - alloc_start);
2283out_reserve_fail:
2275 /* Let go of our reservation. */ 2284 /* Let go of our reservation. */
2276 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); 2285 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
2277 return ret; 2286 return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ca1b767d51f7..09c58a35b429 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -353,6 +353,7 @@ static noinline int compress_file_range(struct inode *inode,
353 int i; 353 int i;
354 int will_compress; 354 int will_compress;
355 int compress_type = root->fs_info->compress_type; 355 int compress_type = root->fs_info->compress_type;
356 int redirty = 0;
356 357
357 /* if this is a small write inside eof, kick off a defrag */ 358 /* if this is a small write inside eof, kick off a defrag */
358 if ((end - start + 1) < 16 * 1024 && 359 if ((end - start + 1) < 16 * 1024 &&
@@ -415,6 +416,17 @@ again:
415 if (BTRFS_I(inode)->force_compress) 416 if (BTRFS_I(inode)->force_compress)
416 compress_type = BTRFS_I(inode)->force_compress; 417 compress_type = BTRFS_I(inode)->force_compress;
417 418
419 /*
420 * we need to call clear_page_dirty_for_io on each
421 * page in the range. Otherwise applications with the file
422 * mmap'd can wander in and change the page contents while
423 * we are compressing them.
424 *
425 * If the compression fails for any reason, we set the pages
426 * dirty again later on.
427 */
428 extent_range_clear_dirty_for_io(inode, start, end);
429 redirty = 1;
418 ret = btrfs_compress_pages(compress_type, 430 ret = btrfs_compress_pages(compress_type,
419 inode->i_mapping, start, 431 inode->i_mapping, start,
420 total_compressed, pages, 432 total_compressed, pages,
@@ -554,6 +566,8 @@ cleanup_and_bail_uncompressed:
554 __set_page_dirty_nobuffers(locked_page); 566 __set_page_dirty_nobuffers(locked_page);
555 /* unlocked later on in the async handlers */ 567 /* unlocked later on in the async handlers */
556 } 568 }
569 if (redirty)
570 extent_range_redirty_for_io(inode, start, end);
557 add_async_extent(async_cow, start, end - start + 1, 571 add_async_extent(async_cow, start, end - start + 1,
558 0, NULL, 0, BTRFS_COMPRESS_NONE); 572 0, NULL, 0, BTRFS_COMPRESS_NONE);
559 *num_added += 1; 573 *num_added += 1;
@@ -1743,8 +1757,10 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1743 struct btrfs_ordered_sum *sum; 1757 struct btrfs_ordered_sum *sum;
1744 1758
1745 list_for_each_entry(sum, list, list) { 1759 list_for_each_entry(sum, list, list) {
1760 trans->adding_csums = 1;
1746 btrfs_csum_file_blocks(trans, 1761 btrfs_csum_file_blocks(trans,
1747 BTRFS_I(inode)->root->fs_info->csum_root, sum); 1762 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1763 trans->adding_csums = 0;
1748 } 1764 }
1749 return 0; 1765 return 0;
1750} 1766}
@@ -3679,11 +3695,9 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
3679 * 1 for the dir item 3695 * 1 for the dir item
3680 * 1 for the dir index 3696 * 1 for the dir index
3681 * 1 for the inode ref 3697 * 1 for the inode ref
3682 * 1 for the inode ref in the tree log
3683 * 2 for the dir entries in the log
3684 * 1 for the inode 3698 * 1 for the inode
3685 */ 3699 */
3686 trans = btrfs_start_transaction(root, 8); 3700 trans = btrfs_start_transaction(root, 5);
3687 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) 3701 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
3688 return trans; 3702 return trans;
3689 3703
@@ -8127,7 +8141,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
8127 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items 8141 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
8128 * should cover the worst case number of items we'll modify. 8142 * should cover the worst case number of items we'll modify.
8129 */ 8143 */
8130 trans = btrfs_start_transaction(root, 20); 8144 trans = btrfs_start_transaction(root, 11);
8131 if (IS_ERR(trans)) { 8145 if (IS_ERR(trans)) {
8132 ret = PTR_ERR(trans); 8146 ret = PTR_ERR(trans);
8133 goto out_notrans; 8147 goto out_notrans;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index dc08d77b717e..005c45db699e 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -557,6 +557,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
557 INIT_LIST_HEAD(&splice); 557 INIT_LIST_HEAD(&splice);
558 INIT_LIST_HEAD(&works); 558 INIT_LIST_HEAD(&works);
559 559
560 mutex_lock(&root->fs_info->ordered_operations_mutex);
560 spin_lock(&root->fs_info->ordered_extent_lock); 561 spin_lock(&root->fs_info->ordered_extent_lock);
561 list_splice_init(&root->fs_info->ordered_extents, &splice); 562 list_splice_init(&root->fs_info->ordered_extents, &splice);
562 while (!list_empty(&splice)) { 563 while (!list_empty(&splice)) {
@@ -600,6 +601,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
600 601
601 cond_resched(); 602 cond_resched();
602 } 603 }
604 mutex_unlock(&root->fs_info->ordered_operations_mutex);
603} 605}
604 606
605/* 607/*
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5471e47d6559..b44124dd2370 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1153,7 +1153,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
1153 ret = btrfs_find_all_roots(trans, fs_info, node->bytenr, 1153 ret = btrfs_find_all_roots(trans, fs_info, node->bytenr,
1154 sgn > 0 ? node->seq - 1 : node->seq, &roots); 1154 sgn > 0 ? node->seq - 1 : node->seq, &roots);
1155 if (ret < 0) 1155 if (ret < 0)
1156 goto out; 1156 return ret;
1157 1157
1158 spin_lock(&fs_info->qgroup_lock); 1158 spin_lock(&fs_info->qgroup_lock);
1159 quota_root = fs_info->quota_root; 1159 quota_root = fs_info->quota_root;
@@ -1275,7 +1275,6 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
1275 ret = 0; 1275 ret = 0;
1276unlock: 1276unlock:
1277 spin_unlock(&fs_info->qgroup_lock); 1277 spin_unlock(&fs_info->qgroup_lock);
1278out:
1279 ulist_free(roots); 1278 ulist_free(roots);
1280 ulist_free(tmp); 1279 ulist_free(tmp);
1281 1280
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 53c3501fa4ca..85e072b956d5 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -542,7 +542,6 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
542 eb = path->nodes[0]; 542 eb = path->nodes[0];
543 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 543 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
544 item_size = btrfs_item_size_nr(eb, path->slots[0]); 544 item_size = btrfs_item_size_nr(eb, path->slots[0]);
545 btrfs_release_path(path);
546 545
547 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 546 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
548 do { 547 do {
@@ -558,7 +557,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
558 ret < 0 ? -1 : ref_level, 557 ret < 0 ? -1 : ref_level,
559 ret < 0 ? -1 : ref_root); 558 ret < 0 ? -1 : ref_root);
560 } while (ret != 1); 559 } while (ret != 1);
560 btrfs_release_path(path);
561 } else { 561 } else {
562 btrfs_release_path(path);
562 swarn.path = path; 563 swarn.path = path;
563 swarn.dev = dev; 564 swarn.dev = dev;
564 iterate_extent_inodes(fs_info, found_key.objectid, 565 iterate_extent_inodes(fs_info, found_key.objectid,
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index f7a8b861058b..c85e7c6b4598 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -3945,12 +3945,10 @@ static int is_extent_unchanged(struct send_ctx *sctx,
3945 found_key.type != key.type) { 3945 found_key.type != key.type) {
3946 key.offset += right_len; 3946 key.offset += right_len;
3947 break; 3947 break;
3948 } else { 3948 }
3949 if (found_key.offset != key.offset + right_len) { 3949 if (found_key.offset != key.offset + right_len) {
3950 /* Should really not happen */ 3950 ret = 0;
3951 ret = -EIO; 3951 goto out;
3952 goto out;
3953 }
3954 } 3952 }
3955 key = found_key; 3953 key = found_key;
3956 } 3954 }
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index d90e0485e01b..678977226570 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4935,7 +4935,18 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4935 em = lookup_extent_mapping(em_tree, chunk_start, 1); 4935 em = lookup_extent_mapping(em_tree, chunk_start, 1);
4936 read_unlock(&em_tree->lock); 4936 read_unlock(&em_tree->lock);
4937 4937
4938 BUG_ON(!em || em->start != chunk_start); 4938 if (!em) {
4939 printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
4940 chunk_start);
4941 return -EIO;
4942 }
4943
4944 if (em->start != chunk_start) {
4945 printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
4946 em->start, chunk_start);
4947 free_extent_map(em);
4948 return -EIO;
4949 }
4939 map = (struct map_lookup *)em->bdev; 4950 map = (struct map_lookup *)em->bdev;
4940 4951
4941 length = em->len; 4952 length = em->len;
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index cfd1ce34e0bc..1d36db114772 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -614,53 +614,10 @@ decode_negTokenInit(unsigned char *security_blob, int length,
614 } 614 }
615 } 615 }
616 616
617 /* mechlistMIC */ 617 /*
618 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { 618 * We currently ignore anything at the end of the SPNEGO blob after
619 /* Check if we have reached the end of the blob, but with 619 * the mechTypes have been parsed, since none of that info is
620 no mechListMic (e.g. NTLMSSP instead of KRB5) */ 620 * used at the moment.
621 if (ctx.error == ASN1_ERR_DEC_EMPTY) 621 */
622 goto decode_negtoken_exit;
623 cFYI(1, "Error decoding last part negTokenInit exit3");
624 return 0;
625 } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
626 /* tag = 3 indicating mechListMIC */
627 cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
628 cls, con, tag, end, *end);
629 return 0;
630 }
631
632 /* sequence */
633 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
634 cFYI(1, "Error decoding last part negTokenInit exit5");
635 return 0;
636 } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
637 || (tag != ASN1_SEQ)) {
638 cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)",
639 cls, con, tag, end, *end);
640 }
641
642 /* sequence of */
643 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
644 cFYI(1, "Error decoding last part negTokenInit exit 7");
645 return 0;
646 } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
647 cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
648 cls, con, tag, end, *end);
649 return 0;
650 }
651
652 /* general string */
653 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
654 cFYI(1, "Error decoding last part negTokenInit exit9");
655 return 0;
656 } else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
657 || (tag != ASN1_GENSTR)) {
658 cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)",
659 cls, con, tag, end, *end);
660 return 0;
661 }
662 cFYI(1, "Need to call asn1_octets_decode() function for %s",
663 ctx.pointer); /* is this UTF-8 or ASCII? */
664decode_negtoken_exit:
665 return 1; 622 return 1;
666} 623}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 3cf8a15af916..345fc89c4286 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -91,6 +91,30 @@ struct workqueue_struct *cifsiod_wq;
91__u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE]; 91__u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE];
92#endif 92#endif
93 93
94/*
95 * Bumps refcount for cifs super block.
96 * Note that it should be only called if a referece to VFS super block is
97 * already held, e.g. in open-type syscalls context. Otherwise it can race with
98 * atomic_dec_and_test in deactivate_locked_super.
99 */
100void
101cifs_sb_active(struct super_block *sb)
102{
103 struct cifs_sb_info *server = CIFS_SB(sb);
104
105 if (atomic_inc_return(&server->active) == 1)
106 atomic_inc(&sb->s_active);
107}
108
109void
110cifs_sb_deactive(struct super_block *sb)
111{
112 struct cifs_sb_info *server = CIFS_SB(sb);
113
114 if (atomic_dec_and_test(&server->active))
115 deactivate_super(sb);
116}
117
94static int 118static int
95cifs_read_super(struct super_block *sb) 119cifs_read_super(struct super_block *sb)
96{ 120{
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 7163419cecd9..0e32c3446ce9 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -41,6 +41,10 @@ extern struct file_system_type cifs_fs_type;
41extern const struct address_space_operations cifs_addr_ops; 41extern const struct address_space_operations cifs_addr_ops;
42extern const struct address_space_operations cifs_addr_ops_smallbuf; 42extern const struct address_space_operations cifs_addr_ops_smallbuf;
43 43
44/* Functions related to super block operations */
45extern void cifs_sb_active(struct super_block *sb);
46extern void cifs_sb_deactive(struct super_block *sb);
47
44/* Functions related to inodes */ 48/* Functions related to inodes */
45extern const struct inode_operations cifs_dir_inode_ops; 49extern const struct inode_operations cifs_dir_inode_ops;
46extern struct inode *cifs_root_iget(struct super_block *); 50extern struct inode *cifs_root_iget(struct super_block *);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8c0d85577314..7a0dd99e4507 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -300,6 +300,8 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
300 INIT_WORK(&cfile->oplock_break, cifs_oplock_break); 300 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
301 mutex_init(&cfile->fh_mutex); 301 mutex_init(&cfile->fh_mutex);
302 302
303 cifs_sb_active(inode->i_sb);
304
303 /* 305 /*
304 * If the server returned a read oplock and we have mandatory brlocks, 306 * If the server returned a read oplock and we have mandatory brlocks,
305 * set oplock level to None. 307 * set oplock level to None.
@@ -349,7 +351,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
349 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); 351 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
350 struct TCP_Server_Info *server = tcon->ses->server; 352 struct TCP_Server_Info *server = tcon->ses->server;
351 struct cifsInodeInfo *cifsi = CIFS_I(inode); 353 struct cifsInodeInfo *cifsi = CIFS_I(inode);
352 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 354 struct super_block *sb = inode->i_sb;
355 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
353 struct cifsLockInfo *li, *tmp; 356 struct cifsLockInfo *li, *tmp;
354 struct cifs_fid fid; 357 struct cifs_fid fid;
355 struct cifs_pending_open open; 358 struct cifs_pending_open open;
@@ -414,6 +417,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
414 417
415 cifs_put_tlink(cifs_file->tlink); 418 cifs_put_tlink(cifs_file->tlink);
416 dput(cifs_file->dentry); 419 dput(cifs_file->dentry);
420 cifs_sb_deactive(sb);
417 kfree(cifs_file); 421 kfree(cifs_file);
418} 422}
419 423
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 0079696305c9..20887bf63121 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1043,7 +1043,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
1043 cifs_sb->mnt_cifs_flags & 1043 cifs_sb->mnt_cifs_flags &
1044 CIFS_MOUNT_MAP_SPECIAL_CHR); 1044 CIFS_MOUNT_MAP_SPECIAL_CHR);
1045 if (rc != 0) { 1045 if (rc != 0) {
1046 rc = -ETXTBSY; 1046 rc = -EBUSY;
1047 goto undo_setattr; 1047 goto undo_setattr;
1048 } 1048 }
1049 1049
@@ -1062,7 +1062,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
1062 if (rc == -ENOENT) 1062 if (rc == -ENOENT)
1063 rc = 0; 1063 rc = 0;
1064 else if (rc != 0) { 1064 else if (rc != 0) {
1065 rc = -ETXTBSY; 1065 rc = -EBUSY;
1066 goto undo_rename; 1066 goto undo_rename;
1067 } 1067 }
1068 cifsInode->delete_pending = true; 1068 cifsInode->delete_pending = true;
@@ -1169,15 +1169,13 @@ psx_del_no_retry:
1169 cifs_drop_nlink(inode); 1169 cifs_drop_nlink(inode);
1170 } else if (rc == -ENOENT) { 1170 } else if (rc == -ENOENT) {
1171 d_drop(dentry); 1171 d_drop(dentry);
1172 } else if (rc == -ETXTBSY) { 1172 } else if (rc == -EBUSY) {
1173 if (server->ops->rename_pending_delete) { 1173 if (server->ops->rename_pending_delete) {
1174 rc = server->ops->rename_pending_delete(full_path, 1174 rc = server->ops->rename_pending_delete(full_path,
1175 dentry, xid); 1175 dentry, xid);
1176 if (rc == 0) 1176 if (rc == 0)
1177 cifs_drop_nlink(inode); 1177 cifs_drop_nlink(inode);
1178 } 1178 }
1179 if (rc == -ETXTBSY)
1180 rc = -EBUSY;
1181 } else if ((rc == -EACCES) && (dosattr == 0) && inode) { 1179 } else if ((rc == -EACCES) && (dosattr == 0) && inode) {
1182 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 1180 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
1183 if (attrs == NULL) { 1181 if (attrs == NULL) {
@@ -1518,7 +1516,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
1518 * source. Note that cross directory moves do not work with 1516 * source. Note that cross directory moves do not work with
1519 * rename by filehandle to various Windows servers. 1517 * rename by filehandle to various Windows servers.
1520 */ 1518 */
1521 if (rc == 0 || rc != -ETXTBSY) 1519 if (rc == 0 || rc != -EBUSY)
1522 goto do_rename_exit; 1520 goto do_rename_exit;
1523 1521
1524 /* open-file renames don't work across directories */ 1522 /* open-file renames don't work across directories */
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index a82bc51fdc82..c0b25b28be6c 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -62,7 +62,7 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
62 {ERRdiffdevice, -EXDEV}, 62 {ERRdiffdevice, -EXDEV},
63 {ERRnofiles, -ENOENT}, 63 {ERRnofiles, -ENOENT},
64 {ERRwriteprot, -EROFS}, 64 {ERRwriteprot, -EROFS},
65 {ERRbadshare, -ETXTBSY}, 65 {ERRbadshare, -EBUSY},
66 {ERRlock, -EACCES}, 66 {ERRlock, -EACCES},
67 {ERRunsup, -EINVAL}, 67 {ERRunsup, -EINVAL},
68 {ERRnosuchshare, -ENXIO}, 68 {ERRnosuchshare, -ENXIO},
diff --git a/fs/dcache.c b/fs/dcache.c
index fbfae008ba44..e8bc3420d63e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2542,7 +2542,6 @@ static int prepend_path(const struct path *path,
2542 bool slash = false; 2542 bool slash = false;
2543 int error = 0; 2543 int error = 0;
2544 2544
2545 br_read_lock(&vfsmount_lock);
2546 while (dentry != root->dentry || vfsmnt != root->mnt) { 2545 while (dentry != root->dentry || vfsmnt != root->mnt) {
2547 struct dentry * parent; 2546 struct dentry * parent;
2548 2547
@@ -2572,8 +2571,6 @@ static int prepend_path(const struct path *path,
2572 if (!error && !slash) 2571 if (!error && !slash)
2573 error = prepend(buffer, buflen, "/", 1); 2572 error = prepend(buffer, buflen, "/", 1);
2574 2573
2575out:
2576 br_read_unlock(&vfsmount_lock);
2577 return error; 2574 return error;
2578 2575
2579global_root: 2576global_root:
@@ -2590,7 +2587,7 @@ global_root:
2590 error = prepend(buffer, buflen, "/", 1); 2587 error = prepend(buffer, buflen, "/", 1);
2591 if (!error) 2588 if (!error)
2592 error = is_mounted(vfsmnt) ? 1 : 2; 2589 error = is_mounted(vfsmnt) ? 1 : 2;
2593 goto out; 2590 return error;
2594} 2591}
2595 2592
2596/** 2593/**
@@ -2617,9 +2614,11 @@ char *__d_path(const struct path *path,
2617 int error; 2614 int error;
2618 2615
2619 prepend(&res, &buflen, "\0", 1); 2616 prepend(&res, &buflen, "\0", 1);
2617 br_read_lock(&vfsmount_lock);
2620 write_seqlock(&rename_lock); 2618 write_seqlock(&rename_lock);
2621 error = prepend_path(path, root, &res, &buflen); 2619 error = prepend_path(path, root, &res, &buflen);
2622 write_sequnlock(&rename_lock); 2620 write_sequnlock(&rename_lock);
2621 br_read_unlock(&vfsmount_lock);
2623 2622
2624 if (error < 0) 2623 if (error < 0)
2625 return ERR_PTR(error); 2624 return ERR_PTR(error);
@@ -2636,9 +2635,11 @@ char *d_absolute_path(const struct path *path,
2636 int error; 2635 int error;
2637 2636
2638 prepend(&res, &buflen, "\0", 1); 2637 prepend(&res, &buflen, "\0", 1);
2638 br_read_lock(&vfsmount_lock);
2639 write_seqlock(&rename_lock); 2639 write_seqlock(&rename_lock);
2640 error = prepend_path(path, &root, &res, &buflen); 2640 error = prepend_path(path, &root, &res, &buflen);
2641 write_sequnlock(&rename_lock); 2641 write_sequnlock(&rename_lock);
2642 br_read_unlock(&vfsmount_lock);
2642 2643
2643 if (error > 1) 2644 if (error > 1)
2644 error = -EINVAL; 2645 error = -EINVAL;
@@ -2702,11 +2703,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
2702 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2703 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2703 2704
2704 get_fs_root(current->fs, &root); 2705 get_fs_root(current->fs, &root);
2706 br_read_lock(&vfsmount_lock);
2705 write_seqlock(&rename_lock); 2707 write_seqlock(&rename_lock);
2706 error = path_with_deleted(path, &root, &res, &buflen); 2708 error = path_with_deleted(path, &root, &res, &buflen);
2709 write_sequnlock(&rename_lock);
2710 br_read_unlock(&vfsmount_lock);
2707 if (error < 0) 2711 if (error < 0)
2708 res = ERR_PTR(error); 2712 res = ERR_PTR(error);
2709 write_sequnlock(&rename_lock);
2710 path_put(&root); 2713 path_put(&root);
2711 return res; 2714 return res;
2712} 2715}
@@ -2830,6 +2833,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2830 get_fs_root_and_pwd(current->fs, &root, &pwd); 2833 get_fs_root_and_pwd(current->fs, &root, &pwd);
2831 2834
2832 error = -ENOENT; 2835 error = -ENOENT;
2836 br_read_lock(&vfsmount_lock);
2833 write_seqlock(&rename_lock); 2837 write_seqlock(&rename_lock);
2834 if (!d_unlinked(pwd.dentry)) { 2838 if (!d_unlinked(pwd.dentry)) {
2835 unsigned long len; 2839 unsigned long len;
@@ -2839,6 +2843,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2839 prepend(&cwd, &buflen, "\0", 1); 2843 prepend(&cwd, &buflen, "\0", 1);
2840 error = prepend_path(&pwd, &root, &cwd, &buflen); 2844 error = prepend_path(&pwd, &root, &cwd, &buflen);
2841 write_sequnlock(&rename_lock); 2845 write_sequnlock(&rename_lock);
2846 br_read_unlock(&vfsmount_lock);
2842 2847
2843 if (error < 0) 2848 if (error < 0)
2844 goto out; 2849 goto out;
@@ -2859,6 +2864,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2859 } 2864 }
2860 } else { 2865 } else {
2861 write_sequnlock(&rename_lock); 2866 write_sequnlock(&rename_lock);
2867 br_read_unlock(&vfsmount_lock);
2862 } 2868 }
2863 2869
2864out: 2870out:
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 4a01ba315262..3b83cd604796 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -335,9 +335,9 @@ struct ext4_group_desc
335 */ 335 */
336 336
337struct flex_groups { 337struct flex_groups {
338 atomic_t free_inodes; 338 atomic64_t free_clusters;
339 atomic_t free_clusters; 339 atomic_t free_inodes;
340 atomic_t used_dirs; 340 atomic_t used_dirs;
341}; 341};
342 342
343#define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ 343#define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */
@@ -2617,7 +2617,7 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
2617extern int __init ext4_init_pageio(void); 2617extern int __init ext4_init_pageio(void);
2618extern void ext4_add_complete_io(ext4_io_end_t *io_end); 2618extern void ext4_add_complete_io(ext4_io_end_t *io_end);
2619extern void ext4_exit_pageio(void); 2619extern void ext4_exit_pageio(void);
2620extern void ext4_ioend_wait(struct inode *); 2620extern void ext4_ioend_shutdown(struct inode *);
2621extern void ext4_free_io_end(ext4_io_end_t *io); 2621extern void ext4_free_io_end(ext4_io_end_t *io);
2622extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); 2622extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
2623extern void ext4_end_io_work(struct work_struct *work); 2623extern void ext4_end_io_work(struct work_struct *work);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 28dd8eeea6a9..56efcaadf848 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1584,10 +1584,12 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1584 unsigned short ext1_ee_len, ext2_ee_len, max_len; 1584 unsigned short ext1_ee_len, ext2_ee_len, max_len;
1585 1585
1586 /* 1586 /*
1587 * Make sure that either both extents are uninitialized, or 1587 * Make sure that both extents are initialized. We don't merge
1588 * both are _not_. 1588 * uninitialized extents so that we can be sure that end_io code has
1589 * the extent that was written properly split out and conversion to
1590 * initialized is trivial.
1589 */ 1591 */
1590 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) 1592 if (ext4_ext_is_uninitialized(ex1) || ext4_ext_is_uninitialized(ex2))
1591 return 0; 1593 return 0;
1592 1594
1593 if (ext4_ext_is_uninitialized(ex1)) 1595 if (ext4_ext_is_uninitialized(ex1))
@@ -2923,7 +2925,7 @@ static int ext4_split_extent_at(handle_t *handle,
2923{ 2925{
2924 ext4_fsblk_t newblock; 2926 ext4_fsblk_t newblock;
2925 ext4_lblk_t ee_block; 2927 ext4_lblk_t ee_block;
2926 struct ext4_extent *ex, newex, orig_ex; 2928 struct ext4_extent *ex, newex, orig_ex, zero_ex;
2927 struct ext4_extent *ex2 = NULL; 2929 struct ext4_extent *ex2 = NULL;
2928 unsigned int ee_len, depth; 2930 unsigned int ee_len, depth;
2929 int err = 0; 2931 int err = 0;
@@ -2943,6 +2945,10 @@ static int ext4_split_extent_at(handle_t *handle,
2943 newblock = split - ee_block + ext4_ext_pblock(ex); 2945 newblock = split - ee_block + ext4_ext_pblock(ex);
2944 2946
2945 BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 2947 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2948 BUG_ON(!ext4_ext_is_uninitialized(ex) &&
2949 split_flag & (EXT4_EXT_MAY_ZEROOUT |
2950 EXT4_EXT_MARK_UNINIT1 |
2951 EXT4_EXT_MARK_UNINIT2));
2946 2952
2947 err = ext4_ext_get_access(handle, inode, path + depth); 2953 err = ext4_ext_get_access(handle, inode, path + depth);
2948 if (err) 2954 if (err)
@@ -2990,12 +2996,26 @@ static int ext4_split_extent_at(handle_t *handle,
2990 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 2996 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2991 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 2997 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2992 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 2998 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
2993 if (split_flag & EXT4_EXT_DATA_VALID1) 2999 if (split_flag & EXT4_EXT_DATA_VALID1) {
2994 err = ext4_ext_zeroout(inode, ex2); 3000 err = ext4_ext_zeroout(inode, ex2);
2995 else 3001 zero_ex.ee_block = ex2->ee_block;
3002 zero_ex.ee_len = ext4_ext_get_actual_len(ex2);
3003 ext4_ext_store_pblock(&zero_ex,
3004 ext4_ext_pblock(ex2));
3005 } else {
2996 err = ext4_ext_zeroout(inode, ex); 3006 err = ext4_ext_zeroout(inode, ex);
2997 } else 3007 zero_ex.ee_block = ex->ee_block;
3008 zero_ex.ee_len = ext4_ext_get_actual_len(ex);
3009 ext4_ext_store_pblock(&zero_ex,
3010 ext4_ext_pblock(ex));
3011 }
3012 } else {
2998 err = ext4_ext_zeroout(inode, &orig_ex); 3013 err = ext4_ext_zeroout(inode, &orig_ex);
3014 zero_ex.ee_block = orig_ex.ee_block;
3015 zero_ex.ee_len = ext4_ext_get_actual_len(&orig_ex);
3016 ext4_ext_store_pblock(&zero_ex,
3017 ext4_ext_pblock(&orig_ex));
3018 }
2999 3019
3000 if (err) 3020 if (err)
3001 goto fix_extent_len; 3021 goto fix_extent_len;
@@ -3003,6 +3023,12 @@ static int ext4_split_extent_at(handle_t *handle,
3003 ex->ee_len = cpu_to_le16(ee_len); 3023 ex->ee_len = cpu_to_le16(ee_len);
3004 ext4_ext_try_to_merge(handle, inode, path, ex); 3024 ext4_ext_try_to_merge(handle, inode, path, ex);
3005 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3025 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3026 if (err)
3027 goto fix_extent_len;
3028
3029 /* update extent status tree */
3030 err = ext4_es_zeroout(inode, &zero_ex);
3031
3006 goto out; 3032 goto out;
3007 } else if (err) 3033 } else if (err)
3008 goto fix_extent_len; 3034 goto fix_extent_len;
@@ -3041,6 +3067,7 @@ static int ext4_split_extent(handle_t *handle,
3041 int err = 0; 3067 int err = 0;
3042 int uninitialized; 3068 int uninitialized;
3043 int split_flag1, flags1; 3069 int split_flag1, flags1;
3070 int allocated = map->m_len;
3044 3071
3045 depth = ext_depth(inode); 3072 depth = ext_depth(inode);
3046 ex = path[depth].p_ext; 3073 ex = path[depth].p_ext;
@@ -3060,20 +3087,29 @@ static int ext4_split_extent(handle_t *handle,
3060 map->m_lblk + map->m_len, split_flag1, flags1); 3087 map->m_lblk + map->m_len, split_flag1, flags1);
3061 if (err) 3088 if (err)
3062 goto out; 3089 goto out;
3090 } else {
3091 allocated = ee_len - (map->m_lblk - ee_block);
3063 } 3092 }
3064 3093 /*
3094 * Update path is required because previous ext4_split_extent_at() may
3095 * result in split of original leaf or extent zeroout.
3096 */
3065 ext4_ext_drop_refs(path); 3097 ext4_ext_drop_refs(path);
3066 path = ext4_ext_find_extent(inode, map->m_lblk, path); 3098 path = ext4_ext_find_extent(inode, map->m_lblk, path);
3067 if (IS_ERR(path)) 3099 if (IS_ERR(path))
3068 return PTR_ERR(path); 3100 return PTR_ERR(path);
3101 depth = ext_depth(inode);
3102 ex = path[depth].p_ext;
3103 uninitialized = ext4_ext_is_uninitialized(ex);
3104 split_flag1 = 0;
3069 3105
3070 if (map->m_lblk >= ee_block) { 3106 if (map->m_lblk >= ee_block) {
3071 split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT | 3107 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3072 EXT4_EXT_DATA_VALID2); 3108 if (uninitialized) {
3073 if (uninitialized)
3074 split_flag1 |= EXT4_EXT_MARK_UNINIT1; 3109 split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3075 if (split_flag & EXT4_EXT_MARK_UNINIT2) 3110 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3076 split_flag1 |= EXT4_EXT_MARK_UNINIT2; 3111 EXT4_EXT_MARK_UNINIT2);
3112 }
3077 err = ext4_split_extent_at(handle, inode, path, 3113 err = ext4_split_extent_at(handle, inode, path,
3078 map->m_lblk, split_flag1, flags); 3114 map->m_lblk, split_flag1, flags);
3079 if (err) 3115 if (err)
@@ -3082,7 +3118,7 @@ static int ext4_split_extent(handle_t *handle,
3082 3118
3083 ext4_ext_show_leaf(inode, path); 3119 ext4_ext_show_leaf(inode, path);
3084out: 3120out:
3085 return err ? err : map->m_len; 3121 return err ? err : allocated;
3086} 3122}
3087 3123
3088/* 3124/*
@@ -3137,6 +3173,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3137 ee_block = le32_to_cpu(ex->ee_block); 3173 ee_block = le32_to_cpu(ex->ee_block);
3138 ee_len = ext4_ext_get_actual_len(ex); 3174 ee_len = ext4_ext_get_actual_len(ex);
3139 allocated = ee_len - (map->m_lblk - ee_block); 3175 allocated = ee_len - (map->m_lblk - ee_block);
3176 zero_ex.ee_len = 0;
3140 3177
3141 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 3178 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3142 3179
@@ -3227,13 +3264,16 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3227 3264
3228 if (EXT4_EXT_MAY_ZEROOUT & split_flag) 3265 if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3229 max_zeroout = sbi->s_extent_max_zeroout_kb >> 3266 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3230 inode->i_sb->s_blocksize_bits; 3267 (inode->i_sb->s_blocksize_bits - 10);
3231 3268
3232 /* If extent is less than s_max_zeroout_kb, zeroout directly */ 3269 /* If extent is less than s_max_zeroout_kb, zeroout directly */
3233 if (max_zeroout && (ee_len <= max_zeroout)) { 3270 if (max_zeroout && (ee_len <= max_zeroout)) {
3234 err = ext4_ext_zeroout(inode, ex); 3271 err = ext4_ext_zeroout(inode, ex);
3235 if (err) 3272 if (err)
3236 goto out; 3273 goto out;
3274 zero_ex.ee_block = ex->ee_block;
3275 zero_ex.ee_len = ext4_ext_get_actual_len(ex);
3276 ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
3237 3277
3238 err = ext4_ext_get_access(handle, inode, path + depth); 3278 err = ext4_ext_get_access(handle, inode, path + depth);
3239 if (err) 3279 if (err)
@@ -3292,6 +3332,9 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3292 err = allocated; 3332 err = allocated;
3293 3333
3294out: 3334out:
3335 /* If we have gotten a failure, don't zero out status tree */
3336 if (!err)
3337 err = ext4_es_zeroout(inode, &zero_ex);
3295 return err ? err : allocated; 3338 return err ? err : allocated;
3296} 3339}
3297 3340
@@ -3374,8 +3417,19 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3374 "block %llu, max_blocks %u\n", inode->i_ino, 3417 "block %llu, max_blocks %u\n", inode->i_ino,
3375 (unsigned long long)ee_block, ee_len); 3418 (unsigned long long)ee_block, ee_len);
3376 3419
3377 /* If extent is larger than requested then split is required */ 3420 /* If extent is larger than requested it is a clear sign that we still
3421 * have some extent state machine issues left. So extent_split is still
3422 * required.
3423 * TODO: Once all related issues will be fixed this situation should be
3424 * illegal.
3425 */
3378 if (ee_block != map->m_lblk || ee_len > map->m_len) { 3426 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3427#ifdef EXT4_DEBUG
3428 ext4_warning("Inode (%ld) finished: extent logical block %llu,"
3429 " len %u; IO logical block %llu, len %u\n",
3430 inode->i_ino, (unsigned long long)ee_block, ee_len,
3431 (unsigned long long)map->m_lblk, map->m_len);
3432#endif
3379 err = ext4_split_unwritten_extents(handle, inode, map, path, 3433 err = ext4_split_unwritten_extents(handle, inode, map, path,
3380 EXT4_GET_BLOCKS_CONVERT); 3434 EXT4_GET_BLOCKS_CONVERT);
3381 if (err < 0) 3435 if (err < 0)
@@ -3626,6 +3680,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3626 path, map->m_len); 3680 path, map->m_len);
3627 } else 3681 } else
3628 err = ret; 3682 err = ret;
3683 map->m_flags |= EXT4_MAP_MAPPED;
3684 if (allocated > map->m_len)
3685 allocated = map->m_len;
3686 map->m_len = allocated;
3629 goto out2; 3687 goto out2;
3630 } 3688 }
3631 /* buffered IO case */ 3689 /* buffered IO case */
@@ -3675,6 +3733,7 @@ out:
3675 allocated - map->m_len); 3733 allocated - map->m_len);
3676 allocated = map->m_len; 3734 allocated = map->m_len;
3677 } 3735 }
3736 map->m_len = allocated;
3678 3737
3679 /* 3738 /*
3680 * If we have done fallocate with the offset that is already 3739 * If we have done fallocate with the offset that is already
@@ -4106,9 +4165,6 @@ got_allocated_blocks:
4106 } 4165 }
4107 } else { 4166 } else {
4108 BUG_ON(allocated_clusters < reserved_clusters); 4167 BUG_ON(allocated_clusters < reserved_clusters);
4109 /* We will claim quota for all newly allocated blocks.*/
4110 ext4_da_update_reserve_space(inode, allocated_clusters,
4111 1);
4112 if (reserved_clusters < allocated_clusters) { 4168 if (reserved_clusters < allocated_clusters) {
4113 struct ext4_inode_info *ei = EXT4_I(inode); 4169 struct ext4_inode_info *ei = EXT4_I(inode);
4114 int reservation = allocated_clusters - 4170 int reservation = allocated_clusters -
@@ -4159,6 +4215,15 @@ got_allocated_blocks:
4159 ei->i_reserved_data_blocks += reservation; 4215 ei->i_reserved_data_blocks += reservation;
4160 spin_unlock(&ei->i_block_reservation_lock); 4216 spin_unlock(&ei->i_block_reservation_lock);
4161 } 4217 }
4218 /*
4219 * We will claim quota for all newly allocated blocks.
4220 * We're updating the reserved space *after* the
4221 * correction above so we do not accidentally free
4222 * all the metadata reservation because we might
4223 * actually need it later on.
4224 */
4225 ext4_da_update_reserve_space(inode, allocated_clusters,
4226 1);
4162 } 4227 }
4163 } 4228 }
4164 4229
@@ -4368,8 +4433,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4368 if (len <= EXT_UNINIT_MAX_LEN << blkbits) 4433 if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4369 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 4434 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4370 4435
4371 /* Prevent race condition between unwritten */
4372 ext4_flush_unwritten_io(inode);
4373retry: 4436retry:
4374 while (ret >= 0 && ret < max_blocks) { 4437 while (ret >= 0 && ret < max_blocks) {
4375 map.m_lblk = map.m_lblk + ret; 4438 map.m_lblk = map.m_lblk + ret;
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 95796a1b7522..fe3337a85ede 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -333,17 +333,27 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
333static int ext4_es_can_be_merged(struct extent_status *es1, 333static int ext4_es_can_be_merged(struct extent_status *es1,
334 struct extent_status *es2) 334 struct extent_status *es2)
335{ 335{
336 if (es1->es_lblk + es1->es_len != es2->es_lblk) 336 if (ext4_es_status(es1) != ext4_es_status(es2))
337 return 0; 337 return 0;
338 338
339 if (ext4_es_status(es1) != ext4_es_status(es2)) 339 if (((__u64) es1->es_len) + es2->es_len > 0xFFFFFFFFULL)
340 return 0; 340 return 0;
341 341
342 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && 342 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk)
343 (ext4_es_pblock(es1) + es1->es_len != ext4_es_pblock(es2)))
344 return 0; 343 return 0;
345 344
346 return 1; 345 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
346 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2)))
347 return 1;
348
349 if (ext4_es_is_hole(es1))
350 return 1;
351
352 /* we need to check delayed extent is without unwritten status */
353 if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
354 return 1;
355
356 return 0;
347} 357}
348 358
349static struct extent_status * 359static struct extent_status *
@@ -389,6 +399,179 @@ ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
389 return es; 399 return es;
390} 400}
391 401
402#ifdef ES_AGGRESSIVE_TEST
403static void ext4_es_insert_extent_ext_check(struct inode *inode,
404 struct extent_status *es)
405{
406 struct ext4_ext_path *path = NULL;
407 struct ext4_extent *ex;
408 ext4_lblk_t ee_block;
409 ext4_fsblk_t ee_start;
410 unsigned short ee_len;
411 int depth, ee_status, es_status;
412
413 path = ext4_ext_find_extent(inode, es->es_lblk, NULL);
414 if (IS_ERR(path))
415 return;
416
417 depth = ext_depth(inode);
418 ex = path[depth].p_ext;
419
420 if (ex) {
421
422 ee_block = le32_to_cpu(ex->ee_block);
423 ee_start = ext4_ext_pblock(ex);
424 ee_len = ext4_ext_get_actual_len(ex);
425
426 ee_status = ext4_ext_is_uninitialized(ex) ? 1 : 0;
427 es_status = ext4_es_is_unwritten(es) ? 1 : 0;
428
429 /*
430 * Make sure ex and es are not overlap when we try to insert
431 * a delayed/hole extent.
432 */
433 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) {
434 if (in_range(es->es_lblk, ee_block, ee_len)) {
435 pr_warn("ES insert assertation failed for "
436 "inode: %lu we can find an extent "
437 "at block [%d/%d/%llu/%c], but we "
438 "want to add an delayed/hole extent "
439 "[%d/%d/%llu/%llx]\n",
440 inode->i_ino, ee_block, ee_len,
441 ee_start, ee_status ? 'u' : 'w',
442 es->es_lblk, es->es_len,
443 ext4_es_pblock(es), ext4_es_status(es));
444 }
445 goto out;
446 }
447
448 /*
449 * We don't check ee_block == es->es_lblk, etc. because es
450 * might be a part of whole extent, vice versa.
451 */
452 if (es->es_lblk < ee_block ||
453 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) {
454 pr_warn("ES insert assertation failed for inode: %lu "
455 "ex_status [%d/%d/%llu/%c] != "
456 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
457 ee_block, ee_len, ee_start,
458 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
459 ext4_es_pblock(es), es_status ? 'u' : 'w');
460 goto out;
461 }
462
463 if (ee_status ^ es_status) {
464 pr_warn("ES insert assertation failed for inode: %lu "
465 "ex_status [%d/%d/%llu/%c] != "
466 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
467 ee_block, ee_len, ee_start,
468 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
469 ext4_es_pblock(es), es_status ? 'u' : 'w');
470 }
471 } else {
472 /*
473 * We can't find an extent on disk. So we need to make sure
474 * that we don't want to add an written/unwritten extent.
475 */
476 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
477 pr_warn("ES insert assertation failed for inode: %lu "
478 "can't find an extent at block %d but we want "
479 "to add an written/unwritten extent "
480 "[%d/%d/%llu/%llx]\n", inode->i_ino,
481 es->es_lblk, es->es_lblk, es->es_len,
482 ext4_es_pblock(es), ext4_es_status(es));
483 }
484 }
485out:
486 if (path) {
487 ext4_ext_drop_refs(path);
488 kfree(path);
489 }
490}
491
492static void ext4_es_insert_extent_ind_check(struct inode *inode,
493 struct extent_status *es)
494{
495 struct ext4_map_blocks map;
496 int retval;
497
498 /*
499 * Here we call ext4_ind_map_blocks to lookup a block mapping because
500 * 'Indirect' structure is defined in indirect.c. So we couldn't
501 * access direct/indirect tree from outside. It is too dirty to define
502 * this function in indirect.c file.
503 */
504
505 map.m_lblk = es->es_lblk;
506 map.m_len = es->es_len;
507
508 retval = ext4_ind_map_blocks(NULL, inode, &map, 0);
509 if (retval > 0) {
510 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) {
511 /*
512 * We want to add a delayed/hole extent but this
513 * block has been allocated.
514 */
515 pr_warn("ES insert assertation failed for inode: %lu "
516 "We can find blocks but we want to add a "
517 "delayed/hole extent [%d/%d/%llu/%llx]\n",
518 inode->i_ino, es->es_lblk, es->es_len,
519 ext4_es_pblock(es), ext4_es_status(es));
520 return;
521 } else if (ext4_es_is_written(es)) {
522 if (retval != es->es_len) {
523 pr_warn("ES insert assertation failed for "
524 "inode: %lu retval %d != es_len %d\n",
525 inode->i_ino, retval, es->es_len);
526 return;
527 }
528 if (map.m_pblk != ext4_es_pblock(es)) {
529 pr_warn("ES insert assertation failed for "
530 "inode: %lu m_pblk %llu != "
531 "es_pblk %llu\n",
532 inode->i_ino, map.m_pblk,
533 ext4_es_pblock(es));
534 return;
535 }
536 } else {
537 /*
538 * We don't need to check unwritten extent because
539 * indirect-based file doesn't have it.
540 */
541 BUG_ON(1);
542 }
543 } else if (retval == 0) {
544 if (ext4_es_is_written(es)) {
545 pr_warn("ES insert assertation failed for inode: %lu "
546 "We can't find the block but we want to add "
547 "an written extent [%d/%d/%llu/%llx]\n",
548 inode->i_ino, es->es_lblk, es->es_len,
549 ext4_es_pblock(es), ext4_es_status(es));
550 return;
551 }
552 }
553}
554
555static inline void ext4_es_insert_extent_check(struct inode *inode,
556 struct extent_status *es)
557{
558 /*
559 * We don't need to worry about the race condition because
560 * caller takes i_data_sem locking.
561 */
562 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
563 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
564 ext4_es_insert_extent_ext_check(inode, es);
565 else
566 ext4_es_insert_extent_ind_check(inode, es);
567}
568#else
569static inline void ext4_es_insert_extent_check(struct inode *inode,
570 struct extent_status *es)
571{
572}
573#endif
574
392static int __es_insert_extent(struct inode *inode, struct extent_status *newes) 575static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
393{ 576{
394 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 577 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
@@ -471,6 +654,8 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
471 ext4_es_store_status(&newes, status); 654 ext4_es_store_status(&newes, status);
472 trace_ext4_es_insert_extent(inode, &newes); 655 trace_ext4_es_insert_extent(inode, &newes);
473 656
657 ext4_es_insert_extent_check(inode, &newes);
658
474 write_lock(&EXT4_I(inode)->i_es_lock); 659 write_lock(&EXT4_I(inode)->i_es_lock);
475 err = __es_remove_extent(inode, lblk, end); 660 err = __es_remove_extent(inode, lblk, end);
476 if (err != 0) 661 if (err != 0)
@@ -669,6 +854,23 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
669 return err; 854 return err;
670} 855}
671 856
857int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex)
858{
859 ext4_lblk_t ee_block;
860 ext4_fsblk_t ee_pblock;
861 unsigned int ee_len;
862
863 ee_block = le32_to_cpu(ex->ee_block);
864 ee_len = ext4_ext_get_actual_len(ex);
865 ee_pblock = ext4_ext_pblock(ex);
866
867 if (ee_len == 0)
868 return 0;
869
870 return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
871 EXTENT_STATUS_WRITTEN);
872}
873
672static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) 874static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
673{ 875{
674 struct ext4_sb_info *sbi = container_of(shrink, 876 struct ext4_sb_info *sbi = container_of(shrink,
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index f190dfe969da..d8e2d4dc311e 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -21,6 +21,12 @@
21#endif 21#endif
22 22
23/* 23/*
24 * With ES_AGGRESSIVE_TEST defined, the result of es caching will be
25 * checked with old map_block's result.
26 */
27#define ES_AGGRESSIVE_TEST__
28
29/*
24 * These flags live in the high bits of extent_status.es_pblk 30 * These flags live in the high bits of extent_status.es_pblk
25 */ 31 */
26#define EXTENT_STATUS_WRITTEN (1ULL << 63) 32#define EXTENT_STATUS_WRITTEN (1ULL << 63)
@@ -33,6 +39,8 @@
33 EXTENT_STATUS_DELAYED | \ 39 EXTENT_STATUS_DELAYED | \
34 EXTENT_STATUS_HOLE) 40 EXTENT_STATUS_HOLE)
35 41
42struct ext4_extent;
43
36struct extent_status { 44struct extent_status {
37 struct rb_node rb_node; 45 struct rb_node rb_node;
38 ext4_lblk_t es_lblk; /* first logical block extent covers */ 46 ext4_lblk_t es_lblk; /* first logical block extent covers */
@@ -58,6 +66,7 @@ extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
58 struct extent_status *es); 66 struct extent_status *es);
59extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, 67extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
60 struct extent_status *es); 68 struct extent_status *es);
69extern int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex);
61 70
62static inline int ext4_es_is_written(struct extent_status *es) 71static inline int ext4_es_is_written(struct extent_status *es)
63{ 72{
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 32fd2b9075dd..6c5bb8d993fe 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -324,8 +324,8 @@ error_return:
324} 324}
325 325
326struct orlov_stats { 326struct orlov_stats {
327 __u64 free_clusters;
327 __u32 free_inodes; 328 __u32 free_inodes;
328 __u32 free_clusters;
329 __u32 used_dirs; 329 __u32 used_dirs;
330}; 330};
331 331
@@ -342,7 +342,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
342 342
343 if (flex_size > 1) { 343 if (flex_size > 1) {
344 stats->free_inodes = atomic_read(&flex_group[g].free_inodes); 344 stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
345 stats->free_clusters = atomic_read(&flex_group[g].free_clusters); 345 stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
346 stats->used_dirs = atomic_read(&flex_group[g].used_dirs); 346 stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
347 return; 347 return;
348 } 348 }
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9ea0cde3fa9e..b3a5213bc73e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -185,8 +185,6 @@ void ext4_evict_inode(struct inode *inode)
185 185
186 trace_ext4_evict_inode(inode); 186 trace_ext4_evict_inode(inode);
187 187
188 ext4_ioend_wait(inode);
189
190 if (inode->i_nlink) { 188 if (inode->i_nlink) {
191 /* 189 /*
192 * When journalling data dirty buffers are tracked only in the 190 * When journalling data dirty buffers are tracked only in the
@@ -207,7 +205,8 @@ void ext4_evict_inode(struct inode *inode)
207 * don't use page cache. 205 * don't use page cache.
208 */ 206 */
209 if (ext4_should_journal_data(inode) && 207 if (ext4_should_journal_data(inode) &&
210 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { 208 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
209 inode->i_ino != EXT4_JOURNAL_INO) {
211 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 210 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
212 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 211 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
213 212
@@ -216,6 +215,7 @@ void ext4_evict_inode(struct inode *inode)
216 filemap_write_and_wait(&inode->i_data); 215 filemap_write_and_wait(&inode->i_data);
217 } 216 }
218 truncate_inode_pages(&inode->i_data, 0); 217 truncate_inode_pages(&inode->i_data, 0);
218 ext4_ioend_shutdown(inode);
219 goto no_delete; 219 goto no_delete;
220 } 220 }
221 221
@@ -225,6 +225,7 @@ void ext4_evict_inode(struct inode *inode)
225 if (ext4_should_order_data(inode)) 225 if (ext4_should_order_data(inode))
226 ext4_begin_ordered_truncate(inode, 0); 226 ext4_begin_ordered_truncate(inode, 0);
227 truncate_inode_pages(&inode->i_data, 0); 227 truncate_inode_pages(&inode->i_data, 0);
228 ext4_ioend_shutdown(inode);
228 229
229 if (is_bad_inode(inode)) 230 if (is_bad_inode(inode))
230 goto no_delete; 231 goto no_delete;
@@ -482,6 +483,58 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
482 return num; 483 return num;
483} 484}
484 485
486#ifdef ES_AGGRESSIVE_TEST
487static void ext4_map_blocks_es_recheck(handle_t *handle,
488 struct inode *inode,
489 struct ext4_map_blocks *es_map,
490 struct ext4_map_blocks *map,
491 int flags)
492{
493 int retval;
494
495 map->m_flags = 0;
496 /*
497 * There is a race window that the result is not the same.
498 * e.g. xfstests #223 when dioread_nolock enables. The reason
499 * is that we lookup a block mapping in extent status tree with
500 * out taking i_data_sem. So at the time the unwritten extent
501 * could be converted.
502 */
503 if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
504 down_read((&EXT4_I(inode)->i_data_sem));
505 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
506 retval = ext4_ext_map_blocks(handle, inode, map, flags &
507 EXT4_GET_BLOCKS_KEEP_SIZE);
508 } else {
509 retval = ext4_ind_map_blocks(handle, inode, map, flags &
510 EXT4_GET_BLOCKS_KEEP_SIZE);
511 }
512 if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
513 up_read((&EXT4_I(inode)->i_data_sem));
514 /*
515 * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag
516 * because it shouldn't be marked in es_map->m_flags.
517 */
518 map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY);
519
520 /*
521 * We don't check m_len because extent will be collpased in status
522 * tree. So the m_len might not equal.
523 */
524 if (es_map->m_lblk != map->m_lblk ||
525 es_map->m_flags != map->m_flags ||
526 es_map->m_pblk != map->m_pblk) {
527 printk("ES cache assertation failed for inode: %lu "
528 "es_cached ex [%d/%d/%llu/%x] != "
529 "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
530 inode->i_ino, es_map->m_lblk, es_map->m_len,
531 es_map->m_pblk, es_map->m_flags, map->m_lblk,
532 map->m_len, map->m_pblk, map->m_flags,
533 retval, flags);
534 }
535}
536#endif /* ES_AGGRESSIVE_TEST */
537
485/* 538/*
486 * The ext4_map_blocks() function tries to look up the requested blocks, 539 * The ext4_map_blocks() function tries to look up the requested blocks,
487 * and returns if the blocks are already mapped. 540 * and returns if the blocks are already mapped.
@@ -509,6 +562,11 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
509{ 562{
510 struct extent_status es; 563 struct extent_status es;
511 int retval; 564 int retval;
565#ifdef ES_AGGRESSIVE_TEST
566 struct ext4_map_blocks orig_map;
567
568 memcpy(&orig_map, map, sizeof(*map));
569#endif
512 570
513 map->m_flags = 0; 571 map->m_flags = 0;
514 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 572 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
@@ -531,6 +589,10 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
531 } else { 589 } else {
532 BUG_ON(1); 590 BUG_ON(1);
533 } 591 }
592#ifdef ES_AGGRESSIVE_TEST
593 ext4_map_blocks_es_recheck(handle, inode, map,
594 &orig_map, flags);
595#endif
534 goto found; 596 goto found;
535 } 597 }
536 598
@@ -551,6 +613,15 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
551 int ret; 613 int ret;
552 unsigned long long status; 614 unsigned long long status;
553 615
616#ifdef ES_AGGRESSIVE_TEST
617 if (retval != map->m_len) {
618 printk("ES len assertation failed for inode: %lu "
619 "retval %d != map->m_len %d "
620 "in %s (lookup)\n", inode->i_ino, retval,
621 map->m_len, __func__);
622 }
623#endif
624
554 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 625 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
555 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 626 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
556 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 627 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
@@ -643,6 +714,24 @@ found:
643 int ret; 714 int ret;
644 unsigned long long status; 715 unsigned long long status;
645 716
717#ifdef ES_AGGRESSIVE_TEST
718 if (retval != map->m_len) {
719 printk("ES len assertation failed for inode: %lu "
720 "retval %d != map->m_len %d "
721 "in %s (allocation)\n", inode->i_ino, retval,
722 map->m_len, __func__);
723 }
724#endif
725
726 /*
727 * If the extent has been zeroed out, we don't need to update
728 * extent status tree.
729 */
730 if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
731 ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
732 if (ext4_es_is_written(&es))
733 goto has_zeroout;
734 }
646 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 735 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
647 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 736 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
648 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 737 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
@@ -655,6 +744,7 @@ found:
655 retval = ret; 744 retval = ret;
656 } 745 }
657 746
747has_zeroout:
658 up_write((&EXT4_I(inode)->i_data_sem)); 748 up_write((&EXT4_I(inode)->i_data_sem));
659 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 749 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
660 int ret = check_block_validity(inode, map); 750 int ret = check_block_validity(inode, map);
@@ -1216,6 +1306,55 @@ static int ext4_journalled_write_end(struct file *file,
1216} 1306}
1217 1307
1218/* 1308/*
1309 * Reserve a metadata for a single block located at lblock
1310 */
1311static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
1312{
1313 int retries = 0;
1314 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1315 struct ext4_inode_info *ei = EXT4_I(inode);
1316 unsigned int md_needed;
1317 ext4_lblk_t save_last_lblock;
1318 int save_len;
1319
1320 /*
1321 * recalculate the amount of metadata blocks to reserve
1322 * in order to allocate nrblocks
1323 * worse case is one extent per block
1324 */
1325repeat:
1326 spin_lock(&ei->i_block_reservation_lock);
1327 /*
1328 * ext4_calc_metadata_amount() has side effects, which we have
1329 * to be prepared undo if we fail to claim space.
1330 */
1331 save_len = ei->i_da_metadata_calc_len;
1332 save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1333 md_needed = EXT4_NUM_B2C(sbi,
1334 ext4_calc_metadata_amount(inode, lblock));
1335 trace_ext4_da_reserve_space(inode, md_needed);
1336
1337 /*
1338 * We do still charge estimated metadata to the sb though;
1339 * we cannot afford to run out of free blocks.
1340 */
1341 if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
1342 ei->i_da_metadata_calc_len = save_len;
1343 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1344 spin_unlock(&ei->i_block_reservation_lock);
1345 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1346 cond_resched();
1347 goto repeat;
1348 }
1349 return -ENOSPC;
1350 }
1351 ei->i_reserved_meta_blocks += md_needed;
1352 spin_unlock(&ei->i_block_reservation_lock);
1353
1354 return 0; /* success */
1355}
1356
1357/*
1219 * Reserve a single cluster located at lblock 1358 * Reserve a single cluster located at lblock
1220 */ 1359 */
1221static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1360static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
@@ -1263,7 +1402,7 @@ repeat:
1263 ei->i_da_metadata_calc_last_lblock = save_last_lblock; 1402 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1264 spin_unlock(&ei->i_block_reservation_lock); 1403 spin_unlock(&ei->i_block_reservation_lock);
1265 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1404 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1266 yield(); 1405 cond_resched();
1267 goto repeat; 1406 goto repeat;
1268 } 1407 }
1269 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1408 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
@@ -1768,6 +1907,11 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1768 struct extent_status es; 1907 struct extent_status es;
1769 int retval; 1908 int retval;
1770 sector_t invalid_block = ~((sector_t) 0xffff); 1909 sector_t invalid_block = ~((sector_t) 0xffff);
1910#ifdef ES_AGGRESSIVE_TEST
1911 struct ext4_map_blocks orig_map;
1912
1913 memcpy(&orig_map, map, sizeof(*map));
1914#endif
1771 1915
1772 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 1916 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1773 invalid_block = ~0; 1917 invalid_block = ~0;
@@ -1809,6 +1953,9 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1809 else 1953 else
1810 BUG_ON(1); 1954 BUG_ON(1);
1811 1955
1956#ifdef ES_AGGRESSIVE_TEST
1957 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1958#endif
1812 return retval; 1959 return retval;
1813 } 1960 }
1814 1961
@@ -1843,8 +1990,11 @@ add_delayed:
1843 * XXX: __block_prepare_write() unmaps passed block, 1990 * XXX: __block_prepare_write() unmaps passed block,
1844 * is it OK? 1991 * is it OK?
1845 */ 1992 */
1846 /* If the block was allocated from previously allocated cluster, 1993 /*
1847 * then we dont need to reserve it again. */ 1994 * If the block was allocated from previously allocated cluster,
1995 * then we don't need to reserve it again. However we still need
1996 * to reserve metadata for every block we're going to write.
1997 */
1848 if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { 1998 if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
1849 ret = ext4_da_reserve_space(inode, iblock); 1999 ret = ext4_da_reserve_space(inode, iblock);
1850 if (ret) { 2000 if (ret) {
@@ -1852,6 +2002,13 @@ add_delayed:
1852 retval = ret; 2002 retval = ret;
1853 goto out_unlock; 2003 goto out_unlock;
1854 } 2004 }
2005 } else {
2006 ret = ext4_da_reserve_metadata(inode, iblock);
2007 if (ret) {
2008 /* not enough space to reserve */
2009 retval = ret;
2010 goto out_unlock;
2011 }
1855 } 2012 }
1856 2013
1857 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 2014 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
@@ -1873,6 +2030,15 @@ add_delayed:
1873 int ret; 2030 int ret;
1874 unsigned long long status; 2031 unsigned long long status;
1875 2032
2033#ifdef ES_AGGRESSIVE_TEST
2034 if (retval != map->m_len) {
2035 printk("ES len assertation failed for inode: %lu "
2036 "retval %d != map->m_len %d "
2037 "in %s (lookup)\n", inode->i_ino, retval,
2038 map->m_len, __func__);
2039 }
2040#endif
2041
1876 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 2042 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1877 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 2043 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1878 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 2044 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
@@ -2908,8 +3074,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
2908 3074
2909 trace_ext4_releasepage(page); 3075 trace_ext4_releasepage(page);
2910 3076
2911 WARN_ON(PageChecked(page)); 3077 /* Page has dirty journalled data -> cannot release */
2912 if (!page_has_buffers(page)) 3078 if (PageChecked(page))
2913 return 0; 3079 return 0;
2914 if (journal) 3080 if (journal)
2915 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3081 return jbd2_journal_try_to_free_buffers(journal, page, wait);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 7bb713a46fe4..ee6614bdb639 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2804,8 +2804,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2804 if (sbi->s_log_groups_per_flex) { 2804 if (sbi->s_log_groups_per_flex) {
2805 ext4_group_t flex_group = ext4_flex_group(sbi, 2805 ext4_group_t flex_group = ext4_flex_group(sbi,
2806 ac->ac_b_ex.fe_group); 2806 ac->ac_b_ex.fe_group);
2807 atomic_sub(ac->ac_b_ex.fe_len, 2807 atomic64_sub(ac->ac_b_ex.fe_len,
2808 &sbi->s_flex_groups[flex_group].free_clusters); 2808 &sbi->s_flex_groups[flex_group].free_clusters);
2809 } 2809 }
2810 2810
2811 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 2811 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
@@ -3692,11 +3692,7 @@ repeat:
3692 if (free < needed && busy) { 3692 if (free < needed && busy) {
3693 busy = 0; 3693 busy = 0;
3694 ext4_unlock_group(sb, group); 3694 ext4_unlock_group(sb, group);
3695 /* 3695 cond_resched();
3696 * Yield the CPU here so that we don't get soft lockup
3697 * in non preempt case.
3698 */
3699 yield();
3700 goto repeat; 3696 goto repeat;
3701 } 3697 }
3702 3698
@@ -4246,7 +4242,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4246 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 4242 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
4247 4243
4248 /* let others to free the space */ 4244 /* let others to free the space */
4249 yield(); 4245 cond_resched();
4250 ar->len = ar->len >> 1; 4246 ar->len = ar->len >> 1;
4251 } 4247 }
4252 if (!ar->len) { 4248 if (!ar->len) {
@@ -4464,7 +4460,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
4464 struct buffer_head *bitmap_bh = NULL; 4460 struct buffer_head *bitmap_bh = NULL;
4465 struct super_block *sb = inode->i_sb; 4461 struct super_block *sb = inode->i_sb;
4466 struct ext4_group_desc *gdp; 4462 struct ext4_group_desc *gdp;
4467 unsigned long freed = 0;
4468 unsigned int overflow; 4463 unsigned int overflow;
4469 ext4_grpblk_t bit; 4464 ext4_grpblk_t bit;
4470 struct buffer_head *gd_bh; 4465 struct buffer_head *gd_bh;
@@ -4666,14 +4661,12 @@ do_more:
4666 4661
4667 if (sbi->s_log_groups_per_flex) { 4662 if (sbi->s_log_groups_per_flex) {
4668 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 4663 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4669 atomic_add(count_clusters, 4664 atomic64_add(count_clusters,
4670 &sbi->s_flex_groups[flex_group].free_clusters); 4665 &sbi->s_flex_groups[flex_group].free_clusters);
4671 } 4666 }
4672 4667
4673 ext4_mb_unload_buddy(&e4b); 4668 ext4_mb_unload_buddy(&e4b);
4674 4669
4675 freed += count;
4676
4677 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 4670 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4678 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 4671 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
4679 4672
@@ -4811,8 +4804,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
4811 4804
4812 if (sbi->s_log_groups_per_flex) { 4805 if (sbi->s_log_groups_per_flex) {
4813 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 4806 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4814 atomic_add(EXT4_NUM_B2C(sbi, blocks_freed), 4807 atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
4815 &sbi->s_flex_groups[flex_group].free_clusters); 4808 &sbi->s_flex_groups[flex_group].free_clusters);
4816 } 4809 }
4817 4810
4818 ext4_mb_unload_buddy(&e4b); 4811 ext4_mb_unload_buddy(&e4b);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 4e81d47aa8cb..33e1c086858b 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -32,16 +32,18 @@
32 */ 32 */
33static inline int 33static inline int
34get_ext_path(struct inode *inode, ext4_lblk_t lblock, 34get_ext_path(struct inode *inode, ext4_lblk_t lblock,
35 struct ext4_ext_path **path) 35 struct ext4_ext_path **orig_path)
36{ 36{
37 int ret = 0; 37 int ret = 0;
38 struct ext4_ext_path *path;
38 39
39 *path = ext4_ext_find_extent(inode, lblock, *path); 40 path = ext4_ext_find_extent(inode, lblock, *orig_path);
40 if (IS_ERR(*path)) { 41 if (IS_ERR(path))
41 ret = PTR_ERR(*path); 42 ret = PTR_ERR(path);
42 *path = NULL; 43 else if (path[ext_depth(inode)].p_ext == NULL)
43 } else if ((*path)[ext_depth(inode)].p_ext == NULL)
44 ret = -ENODATA; 44 ret = -ENODATA;
45 else
46 *orig_path = path;
45 47
46 return ret; 48 return ret;
47} 49}
@@ -611,24 +613,25 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
611{ 613{
612 struct ext4_ext_path *path = NULL; 614 struct ext4_ext_path *path = NULL;
613 struct ext4_extent *ext; 615 struct ext4_extent *ext;
616 int ret = 0;
614 ext4_lblk_t last = from + count; 617 ext4_lblk_t last = from + count;
615 while (from < last) { 618 while (from < last) {
616 *err = get_ext_path(inode, from, &path); 619 *err = get_ext_path(inode, from, &path);
617 if (*err) 620 if (*err)
618 return 0; 621 goto out;
619 ext = path[ext_depth(inode)].p_ext; 622 ext = path[ext_depth(inode)].p_ext;
620 if (!ext) { 623 if (uninit != ext4_ext_is_uninitialized(ext))
621 ext4_ext_drop_refs(path); 624 goto out;
622 return 0;
623 }
624 if (uninit != ext4_ext_is_uninitialized(ext)) {
625 ext4_ext_drop_refs(path);
626 return 0;
627 }
628 from += ext4_ext_get_actual_len(ext); 625 from += ext4_ext_get_actual_len(ext);
629 ext4_ext_drop_refs(path); 626 ext4_ext_drop_refs(path);
630 } 627 }
631 return 1; 628 ret = 1;
629out:
630 if (path) {
631 ext4_ext_drop_refs(path);
632 kfree(path);
633 }
634 return ret;
632} 635}
633 636
634/** 637/**
@@ -666,6 +669,14 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
666 int replaced_count = 0; 669 int replaced_count = 0;
667 int dext_alen; 670 int dext_alen;
668 671
672 *err = ext4_es_remove_extent(orig_inode, from, count);
673 if (*err)
674 goto out;
675
676 *err = ext4_es_remove_extent(donor_inode, from, count);
677 if (*err)
678 goto out;
679
669 /* Get the original extent for the block "orig_off" */ 680 /* Get the original extent for the block "orig_off" */
670 *err = get_ext_path(orig_inode, orig_off, &orig_path); 681 *err = get_ext_path(orig_inode, orig_off, &orig_path);
671 if (*err) 682 if (*err)
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 809b31003ecc..047a6de04a0a 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -50,11 +50,21 @@ void ext4_exit_pageio(void)
50 kmem_cache_destroy(io_page_cachep); 50 kmem_cache_destroy(io_page_cachep);
51} 51}
52 52
53void ext4_ioend_wait(struct inode *inode) 53/*
54 * This function is called by ext4_evict_inode() to make sure there is
55 * no more pending I/O completion work left to do.
56 */
57void ext4_ioend_shutdown(struct inode *inode)
54{ 58{
55 wait_queue_head_t *wq = ext4_ioend_wq(inode); 59 wait_queue_head_t *wq = ext4_ioend_wq(inode);
56 60
57 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); 61 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
62 /*
63 * We need to make sure the work structure is finished being
64 * used before we let the inode get destroyed.
65 */
66 if (work_pending(&EXT4_I(inode)->i_unwritten_work))
67 cancel_work_sync(&EXT4_I(inode)->i_unwritten_work);
58} 68}
59 69
60static void put_io_page(struct ext4_io_page *io_page) 70static void put_io_page(struct ext4_io_page *io_page)
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index b2c8ee56eb98..c169477a62c9 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1360,8 +1360,8 @@ static void ext4_update_super(struct super_block *sb,
1360 sbi->s_log_groups_per_flex) { 1360 sbi->s_log_groups_per_flex) {
1361 ext4_group_t flex_group; 1361 ext4_group_t flex_group;
1362 flex_group = ext4_flex_group(sbi, group_data[0].group); 1362 flex_group = ext4_flex_group(sbi, group_data[0].group);
1363 atomic_add(EXT4_NUM_B2C(sbi, free_blocks), 1363 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1364 &sbi->s_flex_groups[flex_group].free_clusters); 1364 &sbi->s_flex_groups[flex_group].free_clusters);
1365 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, 1365 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1366 &sbi->s_flex_groups[flex_group].free_inodes); 1366 &sbi->s_flex_groups[flex_group].free_inodes);
1367 } 1367 }
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b3818b48f418..5d6d53578124 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1927,8 +1927,8 @@ static int ext4_fill_flex_info(struct super_block *sb)
1927 flex_group = ext4_flex_group(sbi, i); 1927 flex_group = ext4_flex_group(sbi, i);
1928 atomic_add(ext4_free_inodes_count(sb, gdp), 1928 atomic_add(ext4_free_inodes_count(sb, gdp),
1929 &sbi->s_flex_groups[flex_group].free_inodes); 1929 &sbi->s_flex_groups[flex_group].free_inodes);
1930 atomic_add(ext4_free_group_clusters(sb, gdp), 1930 atomic64_add(ext4_free_group_clusters(sb, gdp),
1931 &sbi->s_flex_groups[flex_group].free_clusters); 1931 &sbi->s_flex_groups[flex_group].free_clusters);
1932 atomic_add(ext4_used_dirs_count(sb, gdp), 1932 atomic_add(ext4_used_dirs_count(sb, gdp),
1933 &sbi->s_flex_groups[flex_group].used_dirs); 1933 &sbi->s_flex_groups[flex_group].used_dirs);
1934 } 1934 }
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 21f46fb3a101..8067d3719e94 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -22,7 +22,6 @@
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/pagemap.h> 23#include <linux/pagemap.h>
24#include <linux/kthread.h> 24#include <linux/kthread.h>
25#include <linux/freezer.h>
26#include <linux/writeback.h> 25#include <linux/writeback.h>
27#include <linux/blkdev.h> 26#include <linux/blkdev.h>
28#include <linux/backing-dev.h> 27#include <linux/backing-dev.h>
@@ -88,20 +87,6 @@ static inline struct inode *wb_inode(struct list_head *head)
88#define CREATE_TRACE_POINTS 87#define CREATE_TRACE_POINTS
89#include <trace/events/writeback.h> 88#include <trace/events/writeback.h>
90 89
91/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
92static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
93{
94 if (bdi->wb.task) {
95 wake_up_process(bdi->wb.task);
96 } else {
97 /*
98 * The bdi thread isn't there, wake up the forker thread which
99 * will create and run it.
100 */
101 wake_up_process(default_backing_dev_info.wb.task);
102 }
103}
104
105static void bdi_queue_work(struct backing_dev_info *bdi, 90static void bdi_queue_work(struct backing_dev_info *bdi,
106 struct wb_writeback_work *work) 91 struct wb_writeback_work *work)
107{ 92{
@@ -109,10 +94,9 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
109 94
110 spin_lock_bh(&bdi->wb_lock); 95 spin_lock_bh(&bdi->wb_lock);
111 list_add_tail(&work->list, &bdi->work_list); 96 list_add_tail(&work->list, &bdi->work_list);
112 if (!bdi->wb.task)
113 trace_writeback_nothread(bdi, work);
114 bdi_wakeup_flusher(bdi);
115 spin_unlock_bh(&bdi->wb_lock); 97 spin_unlock_bh(&bdi->wb_lock);
98
99 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
116} 100}
117 101
118static void 102static void
@@ -127,10 +111,8 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
127 */ 111 */
128 work = kzalloc(sizeof(*work), GFP_ATOMIC); 112 work = kzalloc(sizeof(*work), GFP_ATOMIC);
129 if (!work) { 113 if (!work) {
130 if (bdi->wb.task) { 114 trace_writeback_nowork(bdi);
131 trace_writeback_nowork(bdi); 115 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
132 wake_up_process(bdi->wb.task);
133 }
134 return; 116 return;
135 } 117 }
136 118
@@ -177,9 +159,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi)
177 * writeback as soon as there is no other work to do. 159 * writeback as soon as there is no other work to do.
178 */ 160 */
179 trace_writeback_wake_background(bdi); 161 trace_writeback_wake_background(bdi);
180 spin_lock_bh(&bdi->wb_lock); 162 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
181 bdi_wakeup_flusher(bdi);
182 spin_unlock_bh(&bdi->wb_lock);
183} 163}
184 164
185/* 165/*
@@ -1020,66 +1000,48 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
1020 1000
1021/* 1001/*
1022 * Handle writeback of dirty data for the device backed by this bdi. Also 1002 * Handle writeback of dirty data for the device backed by this bdi. Also
1023 * wakes up periodically and does kupdated style flushing. 1003 * reschedules periodically and does kupdated style flushing.
1024 */ 1004 */
1025int bdi_writeback_thread(void *data) 1005void bdi_writeback_workfn(struct work_struct *work)
1026{ 1006{
1027 struct bdi_writeback *wb = data; 1007 struct bdi_writeback *wb = container_of(to_delayed_work(work),
1008 struct bdi_writeback, dwork);
1028 struct backing_dev_info *bdi = wb->bdi; 1009 struct backing_dev_info *bdi = wb->bdi;
1029 long pages_written; 1010 long pages_written;
1030 1011
1031 current->flags |= PF_SWAPWRITE; 1012 current->flags |= PF_SWAPWRITE;
1032 set_freezable();
1033 wb->last_active = jiffies;
1034
1035 /*
1036 * Our parent may run at a different priority, just set us to normal
1037 */
1038 set_user_nice(current, 0);
1039
1040 trace_writeback_thread_start(bdi);
1041 1013
1042 while (!kthread_freezable_should_stop(NULL)) { 1014 if (likely(!current_is_workqueue_rescuer() ||
1015 list_empty(&bdi->bdi_list))) {
1043 /* 1016 /*
1044 * Remove own delayed wake-up timer, since we are already awake 1017 * The normal path. Keep writing back @bdi until its
1045 * and we'll take care of the periodic write-back. 1018 * work_list is empty. Note that this path is also taken
1019 * if @bdi is shutting down even when we're running off the
1020 * rescuer as work_list needs to be drained.
1046 */ 1021 */
1047 del_timer(&wb->wakeup_timer); 1022 do {
1048 1023 pages_written = wb_do_writeback(wb, 0);
1049 pages_written = wb_do_writeback(wb, 0); 1024 trace_writeback_pages_written(pages_written);
1050 1025 } while (!list_empty(&bdi->work_list));
1026 } else {
1027 /*
1028 * bdi_wq can't get enough workers and we're running off
1029 * the emergency worker. Don't hog it. Hopefully, 1024 is
1030 * enough for efficient IO.
1031 */
1032 pages_written = writeback_inodes_wb(&bdi->wb, 1024,
1033 WB_REASON_FORKER_THREAD);
1051 trace_writeback_pages_written(pages_written); 1034 trace_writeback_pages_written(pages_written);
1052
1053 if (pages_written)
1054 wb->last_active = jiffies;
1055
1056 set_current_state(TASK_INTERRUPTIBLE);
1057 if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
1058 __set_current_state(TASK_RUNNING);
1059 continue;
1060 }
1061
1062 if (wb_has_dirty_io(wb) && dirty_writeback_interval)
1063 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
1064 else {
1065 /*
1066 * We have nothing to do, so can go sleep without any
1067 * timeout and save power. When a work is queued or
1068 * something is made dirty - we will be woken up.
1069 */
1070 schedule();
1071 }
1072 } 1035 }
1073 1036
1074 /* Flush any work that raced with us exiting */ 1037 if (!list_empty(&bdi->work_list) ||
1075 if (!list_empty(&bdi->work_list)) 1038 (wb_has_dirty_io(wb) && dirty_writeback_interval))
1076 wb_do_writeback(wb, 1); 1039 queue_delayed_work(bdi_wq, &wb->dwork,
1040 msecs_to_jiffies(dirty_writeback_interval * 10));
1077 1041
1078 trace_writeback_thread_stop(bdi); 1042 current->flags &= ~PF_SWAPWRITE;
1079 return 0;
1080} 1043}
1081 1044
1082
1083/* 1045/*
1084 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 1046 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
1085 * the whole world. 1047 * the whole world.
diff --git a/fs/internal.h b/fs/internal.h
index 507141fceb99..4be78237d896 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -125,3 +125,8 @@ extern int invalidate_inodes(struct super_block *, bool);
125 * dcache.c 125 * dcache.c
126 */ 126 */
127extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); 127extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
128
129/*
130 * read_write.c
131 */
132extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index d6ee5aed56b1..325bc019ed88 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1065,9 +1065,12 @@ out:
1065void jbd2_journal_set_triggers(struct buffer_head *bh, 1065void jbd2_journal_set_triggers(struct buffer_head *bh,
1066 struct jbd2_buffer_trigger_type *type) 1066 struct jbd2_buffer_trigger_type *type)
1067{ 1067{
1068 struct journal_head *jh = bh2jh(bh); 1068 struct journal_head *jh = jbd2_journal_grab_journal_head(bh);
1069 1069
1070 if (WARN_ON(!jh))
1071 return;
1070 jh->b_triggers = type; 1072 jh->b_triggers = type;
1073 jbd2_journal_put_journal_head(jh);
1071} 1074}
1072 1075
1073void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, 1076void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
@@ -1119,17 +1122,18 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1119{ 1122{
1120 transaction_t *transaction = handle->h_transaction; 1123 transaction_t *transaction = handle->h_transaction;
1121 journal_t *journal = transaction->t_journal; 1124 journal_t *journal = transaction->t_journal;
1122 struct journal_head *jh = bh2jh(bh); 1125 struct journal_head *jh;
1123 int ret = 0; 1126 int ret = 0;
1124 1127
1125 jbd_debug(5, "journal_head %p\n", jh);
1126 JBUFFER_TRACE(jh, "entry");
1127 if (is_handle_aborted(handle)) 1128 if (is_handle_aborted(handle))
1128 goto out; 1129 goto out;
1129 if (!buffer_jbd(bh)) { 1130 jh = jbd2_journal_grab_journal_head(bh);
1131 if (!jh) {
1130 ret = -EUCLEAN; 1132 ret = -EUCLEAN;
1131 goto out; 1133 goto out;
1132 } 1134 }
1135 jbd_debug(5, "journal_head %p\n", jh);
1136 JBUFFER_TRACE(jh, "entry");
1133 1137
1134 jbd_lock_bh_state(bh); 1138 jbd_lock_bh_state(bh);
1135 1139
@@ -1220,6 +1224,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1220 spin_unlock(&journal->j_list_lock); 1224 spin_unlock(&journal->j_list_lock);
1221out_unlock_bh: 1225out_unlock_bh:
1222 jbd_unlock_bh_state(bh); 1226 jbd_unlock_bh_state(bh);
1227 jbd2_journal_put_journal_head(jh);
1223out: 1228out:
1224 JBUFFER_TRACE(jh, "exit"); 1229 JBUFFER_TRACE(jh, "exit");
1225 WARN_ON(ret); /* All errors are bugs, so dump the stack */ 1230 WARN_ON(ret); /* All errors are bugs, so dump the stack */
diff --git a/fs/namespace.c b/fs/namespace.c
index 50ca17d3cb45..d581e45c0a9f 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -798,6 +798,10 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
798 } 798 }
799 799
800 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; 800 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
801 /* Don't allow unprivileged users to change mount flags */
802 if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
803 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
804
801 atomic_inc(&sb->s_active); 805 atomic_inc(&sb->s_active);
802 mnt->mnt.mnt_sb = sb; 806 mnt->mnt.mnt_sb = sb;
803 mnt->mnt.mnt_root = dget(root); 807 mnt->mnt.mnt_root = dget(root);
@@ -1713,6 +1717,9 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
1713 if (readonly_request == __mnt_is_readonly(mnt)) 1717 if (readonly_request == __mnt_is_readonly(mnt))
1714 return 0; 1718 return 0;
1715 1719
1720 if (mnt->mnt_flags & MNT_LOCK_READONLY)
1721 return -EPERM;
1722
1716 if (readonly_request) 1723 if (readonly_request)
1717 error = mnt_make_readonly(real_mount(mnt)); 1724 error = mnt_make_readonly(real_mount(mnt));
1718 else 1725 else
@@ -2339,7 +2346,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2339 /* First pass: copy the tree topology */ 2346 /* First pass: copy the tree topology */
2340 copy_flags = CL_COPY_ALL | CL_EXPIRE; 2347 copy_flags = CL_COPY_ALL | CL_EXPIRE;
2341 if (user_ns != mnt_ns->user_ns) 2348 if (user_ns != mnt_ns->user_ns)
2342 copy_flags |= CL_SHARED_TO_SLAVE; 2349 copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
2343 new = copy_tree(old, old->mnt.mnt_root, copy_flags); 2350 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
2344 if (IS_ERR(new)) { 2351 if (IS_ERR(new)) {
2345 up_write(&namespace_sem); 2352 up_write(&namespace_sem);
@@ -2732,6 +2739,51 @@ bool our_mnt(struct vfsmount *mnt)
2732 return check_mnt(real_mount(mnt)); 2739 return check_mnt(real_mount(mnt));
2733} 2740}
2734 2741
2742bool current_chrooted(void)
2743{
2744 /* Does the current process have a non-standard root */
2745 struct path ns_root;
2746 struct path fs_root;
2747 bool chrooted;
2748
2749 /* Find the namespace root */
2750 ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
2751 ns_root.dentry = ns_root.mnt->mnt_root;
2752 path_get(&ns_root);
2753 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
2754 ;
2755
2756 get_fs_root(current->fs, &fs_root);
2757
2758 chrooted = !path_equal(&fs_root, &ns_root);
2759
2760 path_put(&fs_root);
2761 path_put(&ns_root);
2762
2763 return chrooted;
2764}
2765
2766void update_mnt_policy(struct user_namespace *userns)
2767{
2768 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
2769 struct mount *mnt;
2770
2771 down_read(&namespace_sem);
2772 list_for_each_entry(mnt, &ns->list, mnt_list) {
2773 switch (mnt->mnt.mnt_sb->s_magic) {
2774 case SYSFS_MAGIC:
2775 userns->may_mount_sysfs = true;
2776 break;
2777 case PROC_SUPER_MAGIC:
2778 userns->may_mount_proc = true;
2779 break;
2780 }
2781 if (userns->may_mount_sysfs && userns->may_mount_proc)
2782 break;
2783 }
2784 up_read(&namespace_sem);
2785}
2786
2735static void *mntns_get(struct task_struct *task) 2787static void *mntns_get(struct task_struct *task)
2736{ 2788{
2737 struct mnt_namespace *ns = NULL; 2789 struct mnt_namespace *ns = NULL;
diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c
index 737d839bc17b..6fc7b5cae92b 100644
--- a/fs/nfs/blocklayout/blocklayoutdm.c
+++ b/fs/nfs/blocklayout/blocklayoutdm.c
@@ -55,7 +55,8 @@ static void dev_remove(struct net *net, dev_t dev)
55 55
56 bl_pipe_msg.bl_wq = &nn->bl_wq; 56 bl_pipe_msg.bl_wq = &nn->bl_wq;
57 memset(msg, 0, sizeof(*msg)); 57 memset(msg, 0, sizeof(*msg));
58 msg->data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS); 58 msg->len = sizeof(bl_msg) + bl_msg.totallen;
59 msg->data = kzalloc(msg->len, GFP_NOFS);
59 if (!msg->data) 60 if (!msg->data)
60 goto out; 61 goto out;
61 62
@@ -66,7 +67,6 @@ static void dev_remove(struct net *net, dev_t dev)
66 memcpy(msg->data, &bl_msg, sizeof(bl_msg)); 67 memcpy(msg->data, &bl_msg, sizeof(bl_msg));
67 dataptr = (uint8_t *) msg->data; 68 dataptr = (uint8_t *) msg->data;
68 memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request)); 69 memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request));
69 msg->len = sizeof(bl_msg) + bl_msg.totallen;
70 70
71 add_wait_queue(&nn->bl_wq, &wq); 71 add_wait_queue(&nn->bl_wq, &wq);
72 if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) { 72 if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) {
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index dc0f98dfa717..c516da5873fd 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -726,9 +726,9 @@ out1:
726 return ret; 726 return ret;
727} 727}
728 728
729static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data) 729static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data, size_t datalen)
730{ 730{
731 return key_instantiate_and_link(key, data, strlen(data) + 1, 731 return key_instantiate_and_link(key, data, datalen,
732 id_resolver_cache->thread_keyring, 732 id_resolver_cache->thread_keyring,
733 authkey); 733 authkey);
734} 734}
@@ -738,6 +738,7 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
738 struct key *key, struct key *authkey) 738 struct key *key, struct key *authkey)
739{ 739{
740 char id_str[NFS_UINT_MAXLEN]; 740 char id_str[NFS_UINT_MAXLEN];
741 size_t len;
741 int ret = -ENOKEY; 742 int ret = -ENOKEY;
742 743
743 /* ret = -ENOKEY */ 744 /* ret = -ENOKEY */
@@ -747,13 +748,15 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
747 case IDMAP_CONV_NAMETOID: 748 case IDMAP_CONV_NAMETOID:
748 if (strcmp(upcall->im_name, im->im_name) != 0) 749 if (strcmp(upcall->im_name, im->im_name) != 0)
749 break; 750 break;
750 sprintf(id_str, "%d", im->im_id); 751 /* Note: here we store the NUL terminator too */
751 ret = nfs_idmap_instantiate(key, authkey, id_str); 752 len = sprintf(id_str, "%d", im->im_id) + 1;
753 ret = nfs_idmap_instantiate(key, authkey, id_str, len);
752 break; 754 break;
753 case IDMAP_CONV_IDTONAME: 755 case IDMAP_CONV_IDTONAME:
754 if (upcall->im_id != im->im_id) 756 if (upcall->im_id != im->im_id)
755 break; 757 break;
756 ret = nfs_idmap_instantiate(key, authkey, im->im_name); 758 len = strlen(im->im_name);
759 ret = nfs_idmap_instantiate(key, authkey, im->im_name, len);
757 break; 760 break;
758 default: 761 default:
759 ret = -EINVAL; 762 ret = -EINVAL;
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 49eeb044c109..4fb234d3aefb 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -129,7 +129,6 @@ static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo)
129{ 129{
130 if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) 130 if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
131 return; 131 return;
132 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags);
133 pnfs_return_layout(inode); 132 pnfs_return_layout(inode);
134} 133}
135 134
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b2671cb0f901..26431cf62ddb 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2632,7 +2632,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2632 int status; 2632 int status;
2633 2633
2634 if (pnfs_ld_layoutret_on_setattr(inode)) 2634 if (pnfs_ld_layoutret_on_setattr(inode))
2635 pnfs_return_layout(inode); 2635 pnfs_commit_and_return_layout(inode);
2636 2636
2637 nfs_fattr_init(fattr); 2637 nfs_fattr_init(fattr);
2638 2638
@@ -6416,22 +6416,8 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
6416static void nfs4_layoutcommit_release(void *calldata) 6416static void nfs4_layoutcommit_release(void *calldata)
6417{ 6417{
6418 struct nfs4_layoutcommit_data *data = calldata; 6418 struct nfs4_layoutcommit_data *data = calldata;
6419 struct pnfs_layout_segment *lseg, *tmp;
6420 unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
6421 6419
6422 pnfs_cleanup_layoutcommit(data); 6420 pnfs_cleanup_layoutcommit(data);
6423 /* Matched by references in pnfs_set_layoutcommit */
6424 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
6425 list_del_init(&lseg->pls_lc_list);
6426 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
6427 &lseg->pls_flags))
6428 pnfs_put_lseg(lseg);
6429 }
6430
6431 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
6432 smp_mb__after_clear_bit();
6433 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
6434
6435 put_rpccred(data->cred); 6421 put_rpccred(data->cred);
6436 kfree(data); 6422 kfree(data);
6437} 6423}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 48ac5aad6258..4bdffe0ba025 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -417,6 +417,16 @@ should_free_lseg(struct pnfs_layout_range *lseg_range,
417 lo_seg_intersecting(lseg_range, recall_range); 417 lo_seg_intersecting(lseg_range, recall_range);
418} 418}
419 419
420static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
421 struct list_head *tmp_list)
422{
423 if (!atomic_dec_and_test(&lseg->pls_refcount))
424 return false;
425 pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
426 list_add(&lseg->pls_list, tmp_list);
427 return true;
428}
429
420/* Returns 1 if lseg is removed from list, 0 otherwise */ 430/* Returns 1 if lseg is removed from list, 0 otherwise */
421static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, 431static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
422 struct list_head *tmp_list) 432 struct list_head *tmp_list)
@@ -430,11 +440,8 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
430 */ 440 */
431 dprintk("%s: lseg %p ref %d\n", __func__, lseg, 441 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
432 atomic_read(&lseg->pls_refcount)); 442 atomic_read(&lseg->pls_refcount));
433 if (atomic_dec_and_test(&lseg->pls_refcount)) { 443 if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
434 pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
435 list_add(&lseg->pls_list, tmp_list);
436 rv = 1; 444 rv = 1;
437 }
438 } 445 }
439 return rv; 446 return rv;
440} 447}
@@ -777,6 +784,21 @@ send_layoutget(struct pnfs_layout_hdr *lo,
777 return lseg; 784 return lseg;
778} 785}
779 786
787static void pnfs_clear_layoutcommit(struct inode *inode,
788 struct list_head *head)
789{
790 struct nfs_inode *nfsi = NFS_I(inode);
791 struct pnfs_layout_segment *lseg, *tmp;
792
793 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
794 return;
795 list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
796 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
797 continue;
798 pnfs_lseg_dec_and_remove_zero(lseg, head);
799 }
800}
801
780/* 802/*
781 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr 803 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
782 * when the layout segment list is empty. 804 * when the layout segment list is empty.
@@ -808,6 +830,7 @@ _pnfs_return_layout(struct inode *ino)
808 /* Reference matched in nfs4_layoutreturn_release */ 830 /* Reference matched in nfs4_layoutreturn_release */
809 pnfs_get_layout_hdr(lo); 831 pnfs_get_layout_hdr(lo);
810 empty = list_empty(&lo->plh_segs); 832 empty = list_empty(&lo->plh_segs);
833 pnfs_clear_layoutcommit(ino, &tmp_list);
811 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL); 834 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
812 /* Don't send a LAYOUTRETURN if list was initially empty */ 835 /* Don't send a LAYOUTRETURN if list was initially empty */
813 if (empty) { 836 if (empty) {
@@ -820,8 +843,6 @@ _pnfs_return_layout(struct inode *ino)
820 spin_unlock(&ino->i_lock); 843 spin_unlock(&ino->i_lock);
821 pnfs_free_lseg_list(&tmp_list); 844 pnfs_free_lseg_list(&tmp_list);
822 845
823 WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
824
825 lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); 846 lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
826 if (unlikely(lrp == NULL)) { 847 if (unlikely(lrp == NULL)) {
827 status = -ENOMEM; 848 status = -ENOMEM;
@@ -845,6 +866,33 @@ out:
845} 866}
846EXPORT_SYMBOL_GPL(_pnfs_return_layout); 867EXPORT_SYMBOL_GPL(_pnfs_return_layout);
847 868
869int
870pnfs_commit_and_return_layout(struct inode *inode)
871{
872 struct pnfs_layout_hdr *lo;
873 int ret;
874
875 spin_lock(&inode->i_lock);
876 lo = NFS_I(inode)->layout;
877 if (lo == NULL) {
878 spin_unlock(&inode->i_lock);
879 return 0;
880 }
881 pnfs_get_layout_hdr(lo);
882 /* Block new layoutgets and read/write to ds */
883 lo->plh_block_lgets++;
884 spin_unlock(&inode->i_lock);
885 filemap_fdatawait(inode->i_mapping);
886 ret = pnfs_layoutcommit_inode(inode, true);
887 if (ret == 0)
888 ret = _pnfs_return_layout(inode);
889 spin_lock(&inode->i_lock);
890 lo->plh_block_lgets--;
891 spin_unlock(&inode->i_lock);
892 pnfs_put_layout_hdr(lo);
893 return ret;
894}
895
848bool pnfs_roc(struct inode *ino) 896bool pnfs_roc(struct inode *ino)
849{ 897{
850 struct pnfs_layout_hdr *lo; 898 struct pnfs_layout_hdr *lo;
@@ -1458,7 +1506,6 @@ static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
1458 dprintk("pnfs write error = %d\n", hdr->pnfs_error); 1506 dprintk("pnfs write error = %d\n", hdr->pnfs_error);
1459 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & 1507 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1460 PNFS_LAYOUTRET_ON_ERROR) { 1508 PNFS_LAYOUTRET_ON_ERROR) {
1461 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
1462 pnfs_return_layout(hdr->inode); 1509 pnfs_return_layout(hdr->inode);
1463 } 1510 }
1464 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) 1511 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
@@ -1613,7 +1660,6 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
1613 dprintk("pnfs read error = %d\n", hdr->pnfs_error); 1660 dprintk("pnfs read error = %d\n", hdr->pnfs_error);
1614 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & 1661 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1615 PNFS_LAYOUTRET_ON_ERROR) { 1662 PNFS_LAYOUTRET_ON_ERROR) {
1616 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
1617 pnfs_return_layout(hdr->inode); 1663 pnfs_return_layout(hdr->inode);
1618 } 1664 }
1619 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) 1665 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
@@ -1746,11 +1792,27 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
1746 1792
1747 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) { 1793 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
1748 if (lseg->pls_range.iomode == IOMODE_RW && 1794 if (lseg->pls_range.iomode == IOMODE_RW &&
1749 test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) 1795 test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1750 list_add(&lseg->pls_lc_list, listp); 1796 list_add(&lseg->pls_lc_list, listp);
1751 } 1797 }
1752} 1798}
1753 1799
1800static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
1801{
1802 struct pnfs_layout_segment *lseg, *tmp;
1803 unsigned long *bitlock = &NFS_I(inode)->flags;
1804
1805 /* Matched by references in pnfs_set_layoutcommit */
1806 list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
1807 list_del_init(&lseg->pls_lc_list);
1808 pnfs_put_lseg(lseg);
1809 }
1810
1811 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
1812 smp_mb__after_clear_bit();
1813 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
1814}
1815
1754void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) 1816void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
1755{ 1817{
1756 pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode); 1818 pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
@@ -1795,6 +1857,7 @@ void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
1795 1857
1796 if (nfss->pnfs_curr_ld->cleanup_layoutcommit) 1858 if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
1797 nfss->pnfs_curr_ld->cleanup_layoutcommit(data); 1859 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
1860 pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
1798} 1861}
1799 1862
1800/* 1863/*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 94ba80417748..f5f8a470a647 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -219,6 +219,7 @@ void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
219void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); 219void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
220int pnfs_layoutcommit_inode(struct inode *inode, bool sync); 220int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
221int _pnfs_return_layout(struct inode *); 221int _pnfs_return_layout(struct inode *);
222int pnfs_commit_and_return_layout(struct inode *);
222void pnfs_ld_write_done(struct nfs_write_data *); 223void pnfs_ld_write_done(struct nfs_write_data *);
223void pnfs_ld_read_done(struct nfs_read_data *); 224void pnfs_ld_read_done(struct nfs_read_data *);
224struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino, 225struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
@@ -407,6 +408,11 @@ static inline int pnfs_return_layout(struct inode *ino)
407 return 0; 408 return 0;
408} 409}
409 410
411static inline int pnfs_commit_and_return_layout(struct inode *inode)
412{
413 return 0;
414}
415
410static inline bool 416static inline bool
411pnfs_ld_layoutret_on_setattr(struct inode *inode) 417pnfs_ld_layoutret_on_setattr(struct inode *inode)
412{ 418{
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 62c1ee128aeb..ca05f6dc3544 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -102,7 +102,8 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
102{ 102{
103 if (rp->c_type == RC_REPLBUFF) 103 if (rp->c_type == RC_REPLBUFF)
104 kfree(rp->c_replvec.iov_base); 104 kfree(rp->c_replvec.iov_base);
105 hlist_del(&rp->c_hash); 105 if (!hlist_unhashed(&rp->c_hash))
106 hlist_del(&rp->c_hash);
106 list_del(&rp->c_lru); 107 list_del(&rp->c_lru);
107 --num_drc_entries; 108 --num_drc_entries;
108 kmem_cache_free(drc_slab, rp); 109 kmem_cache_free(drc_slab, rp);
@@ -118,6 +119,10 @@ nfsd_reply_cache_free(struct svc_cacherep *rp)
118 119
119int nfsd_reply_cache_init(void) 120int nfsd_reply_cache_init(void)
120{ 121{
122 INIT_LIST_HEAD(&lru_head);
123 max_drc_entries = nfsd_cache_size_limit();
124 num_drc_entries = 0;
125
121 register_shrinker(&nfsd_reply_cache_shrinker); 126 register_shrinker(&nfsd_reply_cache_shrinker);
122 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), 127 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
123 0, 0, NULL); 128 0, 0, NULL);
@@ -128,10 +133,6 @@ int nfsd_reply_cache_init(void)
128 if (!cache_hash) 133 if (!cache_hash)
129 goto out_nomem; 134 goto out_nomem;
130 135
131 INIT_LIST_HEAD(&lru_head);
132 max_drc_entries = nfsd_cache_size_limit();
133 num_drc_entries = 0;
134
135 return 0; 136 return 0;
136out_nomem: 137out_nomem:
137 printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); 138 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 2a7eb536de0b..2b2e2396a869 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1013,6 +1013,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
1013 int host_err; 1013 int host_err;
1014 int stable = *stablep; 1014 int stable = *stablep;
1015 int use_wgather; 1015 int use_wgather;
1016 loff_t pos = offset;
1016 1017
1017 dentry = file->f_path.dentry; 1018 dentry = file->f_path.dentry;
1018 inode = dentry->d_inode; 1019 inode = dentry->d_inode;
@@ -1025,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
1025 1026
1026 /* Write the data. */ 1027 /* Write the data. */
1027 oldfs = get_fs(); set_fs(KERNEL_DS); 1028 oldfs = get_fs(); set_fs(KERNEL_DS);
1028 host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); 1029 host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
1029 set_fs(oldfs); 1030 set_fs(oldfs);
1030 if (host_err < 0) 1031 if (host_err < 0)
1031 goto out_nfserr; 1032 goto out_nfserr;
diff --git a/fs/pnode.c b/fs/pnode.c
index 3e000a51ac0d..8b29d2164da6 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -9,6 +9,7 @@
9#include <linux/mnt_namespace.h> 9#include <linux/mnt_namespace.h>
10#include <linux/mount.h> 10#include <linux/mount.h>
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/nsproxy.h>
12#include "internal.h" 13#include "internal.h"
13#include "pnode.h" 14#include "pnode.h"
14 15
@@ -220,6 +221,7 @@ static struct mount *get_source(struct mount *dest,
220int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry, 221int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
221 struct mount *source_mnt, struct list_head *tree_list) 222 struct mount *source_mnt, struct list_head *tree_list)
222{ 223{
224 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
223 struct mount *m, *child; 225 struct mount *m, *child;
224 int ret = 0; 226 int ret = 0;
225 struct mount *prev_dest_mnt = dest_mnt; 227 struct mount *prev_dest_mnt = dest_mnt;
@@ -237,6 +239,10 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
237 239
238 source = get_source(m, prev_dest_mnt, prev_src_mnt, &type); 240 source = get_source(m, prev_dest_mnt, prev_src_mnt, &type);
239 241
242 /* Notice when we are propagating across user namespaces */
243 if (m->mnt_ns->user_ns != user_ns)
244 type |= CL_UNPRIVILEGED;
245
240 child = copy_tree(source, source->mnt.mnt_root, type); 246 child = copy_tree(source, source->mnt.mnt_root, type);
241 if (IS_ERR(child)) { 247 if (IS_ERR(child)) {
242 ret = PTR_ERR(child); 248 ret = PTR_ERR(child);
diff --git a/fs/pnode.h b/fs/pnode.h
index 19b853a3445c..a0493d5ebfbf 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -23,6 +23,7 @@
23#define CL_MAKE_SHARED 0x08 23#define CL_MAKE_SHARED 0x08
24#define CL_PRIVATE 0x10 24#define CL_PRIVATE 0x10
25#define CL_SHARED_TO_SLAVE 0x20 25#define CL_SHARED_TO_SLAVE 0x20
26#define CL_UNPRIVILEGED 0x40
26 27
27static inline void set_mnt_shared(struct mount *mnt) 28static inline void set_mnt_shared(struct mount *mnt)
28{ 29{
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index a86aebc9ba7c..869116c2afbe 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -446,9 +446,10 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
446 446
447struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) 447struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
448{ 448{
449 struct inode *inode = iget_locked(sb, de->low_ino); 449 struct inode *inode = new_inode_pseudo(sb);
450 450
451 if (inode && (inode->i_state & I_NEW)) { 451 if (inode) {
452 inode->i_ino = de->low_ino;
452 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 453 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
453 PROC_I(inode)->pde = de; 454 PROC_I(inode)->pde = de;
454 455
@@ -476,7 +477,6 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
476 inode->i_fop = de->proc_fops; 477 inode->i_fop = de->proc_fops;
477 } 478 }
478 } 479 }
479 unlock_new_inode(inode);
480 } else 480 } else
481 pde_put(de); 481 pde_put(de);
482 return inode; 482 return inode;
diff --git a/fs/proc/root.c b/fs/proc/root.c
index c6e9fac26bac..9c7fab1d23f0 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -16,6 +16,7 @@
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/bitops.h> 18#include <linux/bitops.h>
19#include <linux/user_namespace.h>
19#include <linux/mount.h> 20#include <linux/mount.h>
20#include <linux/pid_namespace.h> 21#include <linux/pid_namespace.h>
21#include <linux/parser.h> 22#include <linux/parser.h>
@@ -108,6 +109,9 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
108 } else { 109 } else {
109 ns = task_active_pid_ns(current); 110 ns = task_active_pid_ns(current);
110 options = data; 111 options = data;
112
113 if (!current_user_ns()->may_mount_proc)
114 return ERR_PTR(-EPERM);
111 } 115 }
112 116
113 sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns); 117 sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns);
diff --git a/fs/read_write.c b/fs/read_write.c
index a698eff457fb..e6ddc8dceb96 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -17,6 +17,7 @@
17#include <linux/splice.h> 17#include <linux/splice.h>
18#include <linux/compat.h> 18#include <linux/compat.h>
19#include "read_write.h" 19#include "read_write.h"
20#include "internal.h"
20 21
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
22#include <asm/unistd.h> 23#include <asm/unistd.h>
@@ -417,6 +418,33 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
417 418
418EXPORT_SYMBOL(do_sync_write); 419EXPORT_SYMBOL(do_sync_write);
419 420
421ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
422{
423 mm_segment_t old_fs;
424 const char __user *p;
425 ssize_t ret;
426
427 if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
428 return -EINVAL;
429
430 old_fs = get_fs();
431 set_fs(get_ds());
432 p = (__force const char __user *)buf;
433 if (count > MAX_RW_COUNT)
434 count = MAX_RW_COUNT;
435 if (file->f_op->write)
436 ret = file->f_op->write(file, p, count, pos);
437 else
438 ret = do_sync_write(file, p, count, pos);
439 set_fs(old_fs);
440 if (ret > 0) {
441 fsnotify_modify(file);
442 add_wchar(current, ret);
443 }
444 inc_syscw(current);
445 return ret;
446}
447
420ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) 448ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
421{ 449{
422 ssize_t ret; 450 ssize_t ret;
diff --git a/fs/splice.c b/fs/splice.c
index 718bd0056384..29e394e49ddd 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -31,6 +31,7 @@
31#include <linux/security.h> 31#include <linux/security.h>
32#include <linux/gfp.h> 32#include <linux/gfp.h>
33#include <linux/socket.h> 33#include <linux/socket.h>
34#include "internal.h"
34 35
35/* 36/*
36 * Attempt to steal a page from a pipe buffer. This should perhaps go into 37 * Attempt to steal a page from a pipe buffer. This should perhaps go into
@@ -1048,9 +1049,10 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1048{ 1049{
1049 int ret; 1050 int ret;
1050 void *data; 1051 void *data;
1052 loff_t tmp = sd->pos;
1051 1053
1052 data = buf->ops->map(pipe, buf, 0); 1054 data = buf->ops->map(pipe, buf, 0);
1053 ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos); 1055 ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
1054 buf->ops->unmap(pipe, buf, data); 1056 buf->ops->unmap(pipe, buf, data);
1055 1057
1056 return ret; 1058 return ret;
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 2fbdff6be25c..e14512678c9b 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -1020,6 +1020,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
1020 ino = parent_sd->s_ino; 1020 ino = parent_sd->s_ino;
1021 if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0) 1021 if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
1022 filp->f_pos++; 1022 filp->f_pos++;
1023 else
1024 return 0;
1023 } 1025 }
1024 if (filp->f_pos == 1) { 1026 if (filp->f_pos == 1) {
1025 if (parent_sd->s_parent) 1027 if (parent_sd->s_parent)
@@ -1028,6 +1030,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
1028 ino = parent_sd->s_ino; 1030 ino = parent_sd->s_ino;
1029 if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0) 1031 if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0)
1030 filp->f_pos++; 1032 filp->f_pos++;
1033 else
1034 return 0;
1031 } 1035 }
1032 mutex_lock(&sysfs_mutex); 1036 mutex_lock(&sysfs_mutex);
1033 for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos); 1037 for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
@@ -1058,10 +1062,21 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
1058 return 0; 1062 return 0;
1059} 1063}
1060 1064
1065static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence)
1066{
1067 struct inode *inode = file_inode(file);
1068 loff_t ret;
1069
1070 mutex_lock(&inode->i_mutex);
1071 ret = generic_file_llseek(file, offset, whence);
1072 mutex_unlock(&inode->i_mutex);
1073
1074 return ret;
1075}
1061 1076
1062const struct file_operations sysfs_dir_operations = { 1077const struct file_operations sysfs_dir_operations = {
1063 .read = generic_read_dir, 1078 .read = generic_read_dir,
1064 .readdir = sysfs_readdir, 1079 .readdir = sysfs_readdir,
1065 .release = sysfs_dir_release, 1080 .release = sysfs_dir_release,
1066 .llseek = generic_file_llseek, 1081 .llseek = sysfs_dir_llseek,
1067}; 1082};
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 8d924b5ec733..afd83273e6ce 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/magic.h> 20#include <linux/magic.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/user_namespace.h>
22 23
23#include "sysfs.h" 24#include "sysfs.h"
24 25
@@ -111,6 +112,9 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
111 struct super_block *sb; 112 struct super_block *sb;
112 int error; 113 int error;
113 114
115 if (!(flags & MS_KERNMOUNT) && !current_user_ns()->may_mount_sysfs)
116 return ERR_PTR(-EPERM);
117
114 info = kzalloc(sizeof(*info), GFP_KERNEL); 118 info = kzalloc(sizeof(*info), GFP_KERNEL);
115 if (!info) 119 if (!info)
116 return ERR_PTR(-ENOMEM); 120 return ERR_PTR(-ENOMEM);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 4e8f0df82d02..8459b5d8cb71 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1334,6 +1334,12 @@ _xfs_buf_ioapply(
1334 int size; 1334 int size;
1335 int i; 1335 int i;
1336 1336
1337 /*
1338 * Make sure we capture only current IO errors rather than stale errors
1339 * left over from previous use of the buffer (e.g. failed readahead).
1340 */
1341 bp->b_error = 0;
1342
1337 if (bp->b_flags & XBF_WRITE) { 1343 if (bp->b_flags & XBF_WRITE) {
1338 if (bp->b_flags & XBF_SYNCIO) 1344 if (bp->b_flags & XBF_SYNCIO)
1339 rw = WRITE_SYNC; 1345 rw = WRITE_SYNC;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 912d83d8860a..5a30dd899d2b 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -325,7 +325,7 @@ xfs_iomap_eof_want_preallocate(
325 * rather than falling short due to things like stripe unit/width alignment of 325 * rather than falling short due to things like stripe unit/width alignment of
326 * real extents. 326 * real extents.
327 */ 327 */
328STATIC int 328STATIC xfs_fsblock_t
329xfs_iomap_eof_prealloc_initial_size( 329xfs_iomap_eof_prealloc_initial_size(
330 struct xfs_mount *mp, 330 struct xfs_mount *mp,
331 struct xfs_inode *ip, 331 struct xfs_inode *ip,
@@ -413,7 +413,7 @@ xfs_iomap_prealloc_size(
413 * have a large file on a small filesystem and the above 413 * have a large file on a small filesystem and the above
414 * lowspace thresholds are smaller than MAXEXTLEN. 414 * lowspace thresholds are smaller than MAXEXTLEN.
415 */ 415 */
416 while (alloc_blocks >= freesp) 416 while (alloc_blocks && alloc_blocks >= freesp)
417 alloc_blocks >>= 4; 417 alloc_blocks >>= 4;
418 } 418 }
419 419
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index a386b0b654cc..918e8fe2f5e9 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -581,7 +581,11 @@
581 {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 581 {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
582 {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 582 {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
583 {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 583 {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
584 {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 584 {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
585 {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
586 {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
587 {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
588 {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
585 {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 589 {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
586 {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 590 {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
587 {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 591 {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
@@ -592,6 +596,13 @@
592 {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 596 {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
593 {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 597 {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
594 {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 598 {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
599 {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
600 {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
601 {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
602 {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
603 {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
604 {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
605 {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
595 {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 606 {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
596 {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 607 {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
597 {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 608 {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 350459910fe1..c3881553f7d1 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -18,6 +18,7 @@
18#include <linux/writeback.h> 18#include <linux/writeback.h>
19#include <linux/atomic.h> 19#include <linux/atomic.h>
20#include <linux/sysctl.h> 20#include <linux/sysctl.h>
21#include <linux/workqueue.h>
21 22
22struct page; 23struct page;
23struct device; 24struct device;
@@ -27,7 +28,6 @@ struct dentry;
27 * Bits in backing_dev_info.state 28 * Bits in backing_dev_info.state
28 */ 29 */
29enum bdi_state { 30enum bdi_state {
30 BDI_pending, /* On its way to being activated */
31 BDI_wb_alloc, /* Default embedded wb allocated */ 31 BDI_wb_alloc, /* Default embedded wb allocated */
32 BDI_async_congested, /* The async (write) queue is getting full */ 32 BDI_async_congested, /* The async (write) queue is getting full */
33 BDI_sync_congested, /* The sync queue is getting full */ 33 BDI_sync_congested, /* The sync queue is getting full */
@@ -53,10 +53,8 @@ struct bdi_writeback {
53 unsigned int nr; 53 unsigned int nr;
54 54
55 unsigned long last_old_flush; /* last old data flush */ 55 unsigned long last_old_flush; /* last old data flush */
56 unsigned long last_active; /* last time bdi thread was active */
57 56
58 struct task_struct *task; /* writeback thread */ 57 struct delayed_work dwork; /* work item used for writeback */
59 struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */
60 struct list_head b_dirty; /* dirty inodes */ 58 struct list_head b_dirty; /* dirty inodes */
61 struct list_head b_io; /* parked for writeback */ 59 struct list_head b_io; /* parked for writeback */
62 struct list_head b_more_io; /* parked for more writeback */ 60 struct list_head b_more_io; /* parked for more writeback */
@@ -123,14 +121,15 @@ int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
123void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 121void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
124 enum wb_reason reason); 122 enum wb_reason reason);
125void bdi_start_background_writeback(struct backing_dev_info *bdi); 123void bdi_start_background_writeback(struct backing_dev_info *bdi);
126int bdi_writeback_thread(void *data); 124void bdi_writeback_workfn(struct work_struct *work);
127int bdi_has_dirty_io(struct backing_dev_info *bdi); 125int bdi_has_dirty_io(struct backing_dev_info *bdi);
128void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); 126void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
129void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2); 127void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
130 128
131extern spinlock_t bdi_lock; 129extern spinlock_t bdi_lock;
132extern struct list_head bdi_list; 130extern struct list_head bdi_list;
133extern struct list_head bdi_pending_list; 131
132extern struct workqueue_struct *bdi_wq;
134 133
135static inline int wb_has_dirty_io(struct bdi_writeback *wb) 134static inline int wb_has_dirty_io(struct bdi_writeback *wb)
136{ 135{
@@ -336,11 +335,6 @@ static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
336 return bdi->capabilities & BDI_CAP_SWAP_BACKED; 335 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
337} 336}
338 337
339static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
340{
341 return bdi == &default_backing_dev_info;
342}
343
344static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) 338static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
345{ 339{
346 return bdi_cap_writeback_dirty(mapping->backing_dev_info); 340 return bdi_cap_writeback_dirty(mapping->backing_dev_info);
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 032560295fcb..d08e4d2a9b92 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -591,6 +591,21 @@ static inline int cpulist_scnprintf(char *buf, int len,
591} 591}
592 592
593/** 593/**
594 * cpumask_parse - extract a cpumask from from a string
595 * @buf: the buffer to extract from
596 * @dstp: the cpumask to set.
597 *
598 * Returns -errno, or 0 for success.
599 */
600static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
601{
602 char *nl = strchr(buf, '\n');
603 int len = nl ? nl - buf : strlen(buf);
604
605 return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
606}
607
608/**
594 * cpulist_parse - extract a cpumask from a user string of ranges 609 * cpulist_parse - extract a cpumask from a user string of ranges
595 * @buf: the buffer to extract from 610 * @buf: the buffer to extract from
596 * @dstp: the cpumask to set. 611 * @dstp: the cpumask to set.
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index a975de1ff59f..3bd46f766751 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -51,7 +51,7 @@ struct task_struct;
51extern void debug_show_all_locks(void); 51extern void debug_show_all_locks(void);
52extern void debug_show_held_locks(struct task_struct *task); 52extern void debug_show_held_locks(struct task_struct *task);
53extern void debug_check_no_locks_freed(const void *from, unsigned long len); 53extern void debug_check_no_locks_freed(const void *from, unsigned long len);
54extern void debug_check_no_locks_held(void); 54extern void debug_check_no_locks_held(struct task_struct *task);
55#else 55#else
56static inline void debug_show_all_locks(void) 56static inline void debug_show_all_locks(void)
57{ 57{
@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len)
67} 67}
68 68
69static inline void 69static inline void
70debug_check_no_locks_held(void) 70debug_check_no_locks_held(struct task_struct *task)
71{ 71{
72} 72}
73#endif 73#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index 9d6464ea99c6..ee10d4e7be1a 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -302,6 +302,8 @@ void subsys_interface_unregister(struct subsys_interface *sif);
302 302
303int subsys_system_register(struct bus_type *subsys, 303int subsys_system_register(struct bus_type *subsys,
304 const struct attribute_group **groups); 304 const struct attribute_group **groups);
305int subsys_virtual_register(struct bus_type *subsys,
306 const struct attribute_group **groups);
305 307
306/** 308/**
307 * struct class - device classes 309 * struct class - device classes
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 4fd4999ccb5b..0b763276f619 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -561,7 +561,6 @@ struct csrow_info {
561 561
562 u32 ue_count; /* Uncorrectable Errors for this csrow */ 562 u32 ue_count; /* Uncorrectable Errors for this csrow */
563 u32 ce_count; /* Correctable Errors for this csrow */ 563 u32 ce_count; /* Correctable Errors for this csrow */
564 u32 nr_pages; /* combined pages count of all channels */
565 564
566 struct mem_ctl_info *mci; /* the parent */ 565 struct mem_ctl_info *mci; /* the parent */
567 566
@@ -676,11 +675,11 @@ struct mem_ctl_info {
676 * sees memory sticks ("dimms"), and the ones that sees memory ranks. 675 * sees memory sticks ("dimms"), and the ones that sees memory ranks.
677 * All old memory controllers enumerate memories per rank, but most 676 * All old memory controllers enumerate memories per rank, but most
678 * of the recent drivers enumerate memories per DIMM, instead. 677 * of the recent drivers enumerate memories per DIMM, instead.
679 * When the memory controller is per rank, mem_is_per_rank is true. 678 * When the memory controller is per rank, csbased is true.
680 */ 679 */
681 unsigned n_layers; 680 unsigned n_layers;
682 struct edac_mc_layer *layers; 681 struct edac_mc_layer *layers;
683 bool mem_is_per_rank; 682 bool csbased;
684 683
685 /* 684 /*
686 * DIMM info. Will eventually remove the entire csrows_info some day 685 * DIMM info. Will eventually remove the entire csrows_info some day
@@ -741,8 +740,6 @@ struct mem_ctl_info {
741 u32 fake_inject_ue; 740 u32 fake_inject_ue;
742 u16 fake_inject_count; 741 u16 fake_inject_count;
743#endif 742#endif
744 __u8 csbased : 1, /* csrow-based memory controller */
745 __resv : 7;
746}; 743};
747 744
748#endif 745#endif
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 043a5cf8b5ba..e70df40d84f6 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -3,7 +3,6 @@
3#ifndef FREEZER_H_INCLUDED 3#ifndef FREEZER_H_INCLUDED
4#define FREEZER_H_INCLUDED 4#define FREEZER_H_INCLUDED
5 5
6#include <linux/debug_locks.h>
7#include <linux/sched.h> 6#include <linux/sched.h>
8#include <linux/wait.h> 7#include <linux/wait.h>
9#include <linux/atomic.h> 8#include <linux/atomic.h>
@@ -49,8 +48,6 @@ extern void thaw_kernel_threads(void);
49 48
50static inline bool try_to_freeze(void) 49static inline bool try_to_freeze(void)
51{ 50{
52 if (!(current->flags & PF_NOFREEZE))
53 debug_check_no_locks_held();
54 might_sleep(); 51 might_sleep();
55 if (likely(!freezing(current))) 52 if (likely(!freezing(current)))
56 return false; 53 return false;
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 729eded4b24f..2b93a9a5a1e6 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -50,4 +50,6 @@ static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
50 spin_unlock(&fs->lock); 50 spin_unlock(&fs->lock);
51} 51}
52 52
53extern bool current_chrooted(void);
54
53#endif /* _LINUX_FS_STRUCT_H */ 55#endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 61c97ae22e01..f09a0ae4d858 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <asm/types.h> 17#include <asm/types.h>
18#include <linux/compiler.h>
18 19
19/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ 20/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
20#define GOLDEN_RATIO_PRIME_32 0x9e370001UL 21#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
@@ -31,7 +32,7 @@
31#error Wordsize not 32 or 64 32#error Wordsize not 32 or 64
32#endif 33#endif
33 34
34static inline u64 hash_64(u64 val, unsigned int bits) 35static __always_inline u64 hash_64(u64 val, unsigned int bits)
35{ 36{
36 u64 hash = val; 37 u64 hash = val;
37 38
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index f5dbce50466e..66017028dcb3 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -37,7 +37,7 @@ void irq_work_sync(struct irq_work *work);
37#ifdef CONFIG_IRQ_WORK 37#ifdef CONFIG_IRQ_WORK
38bool irq_work_needs_cpu(void); 38bool irq_work_needs_cpu(void);
39#else 39#else
40static bool irq_work_needs_cpu(void) { return false; } 40static inline bool irq_work_needs_cpu(void) { return false; }
41#endif 41#endif
42 42
43#endif /* _LINUX_IRQ_WORK_H */ 43#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 80d36874689b..79fdd80a42d4 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -390,7 +390,6 @@ extern struct pid *session_of_pgrp(struct pid *pgrp);
390unsigned long int_sqrt(unsigned long); 390unsigned long int_sqrt(unsigned long);
391 391
392extern void bust_spinlocks(int yes); 392extern void bust_spinlocks(int yes);
393extern void wake_up_klogd(void);
394extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ 393extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
395extern int panic_timeout; 394extern int panic_timeout;
396extern int panic_on_oops; 395extern int panic_on_oops;
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 5b18ecde69b5..1aa4f13cdfa6 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -106,6 +106,29 @@ enum max77693_muic_reg {
106 MAX77693_MUIC_REG_END, 106 MAX77693_MUIC_REG_END,
107}; 107};
108 108
109/* MAX77693 INTMASK1~2 Register */
110#define INTMASK1_ADC1K_SHIFT 3
111#define INTMASK1_ADCERR_SHIFT 2
112#define INTMASK1_ADCLOW_SHIFT 1
113#define INTMASK1_ADC_SHIFT 0
114#define INTMASK1_ADC1K_MASK (1 << INTMASK1_ADC1K_SHIFT)
115#define INTMASK1_ADCERR_MASK (1 << INTMASK1_ADCERR_SHIFT)
116#define INTMASK1_ADCLOW_MASK (1 << INTMASK1_ADCLOW_SHIFT)
117#define INTMASK1_ADC_MASK (1 << INTMASK1_ADC_SHIFT)
118
119#define INTMASK2_VIDRM_SHIFT 5
120#define INTMASK2_VBVOLT_SHIFT 4
121#define INTMASK2_DXOVP_SHIFT 3
122#define INTMASK2_DCDTMR_SHIFT 2
123#define INTMASK2_CHGDETRUN_SHIFT 1
124#define INTMASK2_CHGTYP_SHIFT 0
125#define INTMASK2_VIDRM_MASK (1 << INTMASK2_VIDRM_SHIFT)
126#define INTMASK2_VBVOLT_MASK (1 << INTMASK2_VBVOLT_SHIFT)
127#define INTMASK2_DXOVP_MASK (1 << INTMASK2_DXOVP_SHIFT)
128#define INTMASK2_DCDTMR_MASK (1 << INTMASK2_DCDTMR_SHIFT)
129#define INTMASK2_CHGDETRUN_MASK (1 << INTMASK2_CHGDETRUN_SHIFT)
130#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT)
131
109/* MAX77693 MUIC - STATUS1~3 Register */ 132/* MAX77693 MUIC - STATUS1~3 Register */
110#define STATUS1_ADC_SHIFT (0) 133#define STATUS1_ADC_SHIFT (0)
111#define STATUS1_ADCLOW_SHIFT (5) 134#define STATUS1_ADCLOW_SHIFT (5)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7acc9dc73c9f..e19ff30ad0a2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -87,7 +87,6 @@ extern unsigned int kobjsize(const void *objp);
87#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 87#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
88#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 88#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
89 89
90#define VM_POPULATE 0x00001000
91#define VM_LOCKED 0x00002000 90#define VM_LOCKED 0x00002000
92#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 91#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
93 92
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 61c7a87e5d2b..9aa863da287f 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -79,8 +79,6 @@ calc_vm_flag_bits(unsigned long flags)
79{ 79{
80 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | 80 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
81 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | 81 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
82 ((flags & MAP_LOCKED) ? (VM_LOCKED | VM_POPULATE) : 0) | 82 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
83 (((flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE) ?
84 VM_POPULATE : 0);
85} 83}
86#endif /* _LINUX_MMAN_H */ 84#endif /* _LINUX_MMAN_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ede274957e05..c74092eebf5c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -527,7 +527,7 @@ static inline int zone_is_oom_locked(const struct zone *zone)
527 return test_bit(ZONE_OOM_LOCKED, &zone->flags); 527 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
528} 528}
529 529
530static inline unsigned zone_end_pfn(const struct zone *zone) 530static inline unsigned long zone_end_pfn(const struct zone *zone)
531{ 531{
532 return zone->zone_start_pfn + zone->spanned_pages; 532 return zone->zone_start_pfn + zone->spanned_pages;
533} 533}
diff --git a/include/linux/mount.h b/include/linux/mount.h
index d7029f4a191a..73005f9957ea 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -47,6 +47,8 @@ struct mnt_namespace;
47 47
48#define MNT_INTERNAL 0x4000 48#define MNT_INTERNAL 0x4000
49 49
50#define MNT_LOCK_READONLY 0x400000
51
50struct vfsmount { 52struct vfsmount {
51 struct dentry *mnt_root; /* root of the mounted tree */ 53 struct dentry *mnt_root; /* root of the mounted tree */
52 struct super_block *mnt_sb; /* pointer to superblock */ 54 struct super_block *mnt_sb; /* pointer to superblock */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 7ccb3c59ed60..ef52d9c91459 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -187,6 +187,13 @@ typedef enum {
187 * This happens with the Renesas AG-AND chips, possibly others. 187 * This happens with the Renesas AG-AND chips, possibly others.
188 */ 188 */
189#define BBT_AUTO_REFRESH 0x00000080 189#define BBT_AUTO_REFRESH 0x00000080
190/*
191 * Chip requires ready check on read (for auto-incremented sequential read).
192 * True only for small page devices; large page devices do not support
193 * autoincrement.
194 */
195#define NAND_NEED_READRDY 0x00000100
196
190/* Chip does not allow subpage writes */ 197/* Chip does not allow subpage writes */
191#define NAND_NO_SUBPAGE_WRITE 0x00000200 198#define NAND_NO_SUBPAGE_WRITE 0x00000200
192 199
diff --git a/include/linux/mxsfb.h b/include/linux/mxsfb.h
index f14943d55315..f80af8674342 100644
--- a/include/linux/mxsfb.h
+++ b/include/linux/mxsfb.h
@@ -24,8 +24,8 @@
24#define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */ 24#define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */
25#define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */ 25#define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
26 26
27#define FB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6) 27#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
28#define FB_SYNC_DOTCLK_FAILING_ACT (1 << 7) /* failing/negtive edge sampling */ 28#define MXSFB_SYNC_DOTCLK_FAILING_ACT (1 << 7) /* failing/negtive edge sampling */
29 29
30struct mxsfb_platform_data { 30struct mxsfb_platform_data {
31 struct fb_videomode *mode_list; 31 struct fb_videomode *mode_list;
@@ -44,6 +44,9 @@ struct mxsfb_platform_data {
44 * allocated. If specified,fb_size must also be specified. 44 * allocated. If specified,fb_size must also be specified.
45 * fb_phys must be unused by Linux. 45 * fb_phys must be unused by Linux.
46 */ 46 */
47 u32 sync; /* sync mask, contains MXSFB specifics not
48 * carried in fb_info->var.sync
49 */
47}; 50};
48 51
49#endif /* __LINUX_MXSFB_H */ 52#endif /* __LINUX_MXSFB_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c25cccaa555a..4fa3b0b9b071 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -137,6 +137,34 @@ enum {
137 NVME_LBAF_RP_DEGRADED = 3, 137 NVME_LBAF_RP_DEGRADED = 3,
138}; 138};
139 139
140struct nvme_smart_log {
141 __u8 critical_warning;
142 __u8 temperature[2];
143 __u8 avail_spare;
144 __u8 spare_thresh;
145 __u8 percent_used;
146 __u8 rsvd6[26];
147 __u8 data_units_read[16];
148 __u8 data_units_written[16];
149 __u8 host_reads[16];
150 __u8 host_writes[16];
151 __u8 ctrl_busy_time[16];
152 __u8 power_cycles[16];
153 __u8 power_on_hours[16];
154 __u8 unsafe_shutdowns[16];
155 __u8 media_errors[16];
156 __u8 num_err_log_entries[16];
157 __u8 rsvd192[320];
158};
159
160enum {
161 NVME_SMART_CRIT_SPARE = 1 << 0,
162 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
163 NVME_SMART_CRIT_RELIABILITY = 1 << 2,
164 NVME_SMART_CRIT_MEDIA = 1 << 3,
165 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
166};
167
140struct nvme_lba_range_type { 168struct nvme_lba_range_type {
141 __u8 type; 169 __u8 type;
142 __u8 attributes; 170 __u8 attributes;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 1249a54d17e0..822171fcb1c8 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -134,6 +134,8 @@ extern int printk_delay_msec;
134extern int dmesg_restrict; 134extern int dmesg_restrict;
135extern int kptr_restrict; 135extern int kptr_restrict;
136 136
137extern void wake_up_klogd(void);
138
137void log_buf_kexec_setup(void); 139void log_buf_kexec_setup(void);
138void __init setup_log_buf(int early); 140void __init setup_log_buf(int early);
139#else 141#else
@@ -162,6 +164,10 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies,
162 return false; 164 return false;
163} 165}
164 166
167static inline void wake_up_klogd(void)
168{
169}
170
165static inline void log_buf_kexec_setup(void) 171static inline void log_buf_kexec_setup(void)
166{ 172{
167} 173}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d35d2b6ddbfb..e5c64f7b8c1d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1793,7 +1793,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
1793#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1793#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1794#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1794#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
1795#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 1795#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
1796#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ 1796#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1797#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1797#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1798#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1798#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1799#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1799#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 821c7f45d2a7..441f5bfdab8e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -500,7 +500,7 @@ struct sk_buff {
500 union { 500 union {
501 __u32 mark; 501 __u32 mark;
502 __u32 dropcount; 502 __u32 dropcount;
503 __u32 avail_size; 503 __u32 reserved_tailroom;
504 }; 504 };
505 505
506 sk_buff_data_t inner_transport_header; 506 sk_buff_data_t inner_transport_header;
@@ -1288,11 +1288,13 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1288 * do not lose pfmemalloc information as the pages would not be 1288 * do not lose pfmemalloc information as the pages would not be
1289 * allocated using __GFP_MEMALLOC. 1289 * allocated using __GFP_MEMALLOC.
1290 */ 1290 */
1291 if (page->pfmemalloc && !page->mapping)
1292 skb->pfmemalloc = true;
1293 frag->page.p = page; 1291 frag->page.p = page;
1294 frag->page_offset = off; 1292 frag->page_offset = off;
1295 skb_frag_size_set(frag, size); 1293 skb_frag_size_set(frag, size);
1294
1295 page = compound_head(page);
1296 if (page->pfmemalloc && !page->mapping)
1297 skb->pfmemalloc = true;
1296} 1298}
1297 1299
1298/** 1300/**
@@ -1447,7 +1449,10 @@ static inline int skb_tailroom(const struct sk_buff *skb)
1447 */ 1449 */
1448static inline int skb_availroom(const struct sk_buff *skb) 1450static inline int skb_availroom(const struct sk_buff *skb)
1449{ 1451{
1450 return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; 1452 if (skb_is_nonlinear(skb))
1453 return 0;
1454
1455 return skb->end - skb->tail - skb->reserved_tailroom;
1451} 1456}
1452 1457
1453/** 1458/**
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index f0bd7f90a90d..e3c0ae9bb1fa 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -44,7 +44,7 @@
44/* Adding event notification support elements */ 44/* Adding event notification support elements */
45#define THERMAL_GENL_FAMILY_NAME "thermal_event" 45#define THERMAL_GENL_FAMILY_NAME "thermal_event"
46#define THERMAL_GENL_VERSION 0x01 46#define THERMAL_GENL_VERSION 0x01
47#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_group" 47#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
48 48
49/* Default Thermal Governor */ 49/* Default Thermal Governor */
50#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) 50#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 9d81de123c90..42278bbf7a88 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -68,6 +68,7 @@ struct udp_sock {
68 * For encapsulation sockets. 68 * For encapsulation sockets.
69 */ 69 */
70 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 70 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
71 void (*encap_destroy)(struct sock *sk);
71}; 72};
72 73
73static inline struct udp_sock *udp_sk(const struct sock *sk) 74static inline struct udp_sock *udp_sk(const struct sock *sk)
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 3b8f9d4fc3fe..cc25b70af33c 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -127,6 +127,7 @@ struct cdc_ncm_ctx {
127 u16 connected; 127 u16 connected;
128}; 128};
129 129
130extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
130extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); 131extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
131extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); 132extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
132extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign); 133extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign);
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 0a78df5f6cfd..59694b5e5e90 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -357,6 +357,7 @@ struct hc_driver {
357 */ 357 */
358 int (*disable_usb3_lpm_timeout)(struct usb_hcd *, 358 int (*disable_usb3_lpm_timeout)(struct usb_hcd *,
359 struct usb_device *, enum usb3_link_state state); 359 struct usb_device *, enum usb3_link_state state);
360 int (*find_raw_port_number)(struct usb_hcd *, int);
360}; 361};
361 362
362extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); 363extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
@@ -396,6 +397,7 @@ extern int usb_hcd_is_primary_hcd(struct usb_hcd *hcd);
396extern int usb_add_hcd(struct usb_hcd *hcd, 397extern int usb_add_hcd(struct usb_hcd *hcd,
397 unsigned int irqnum, unsigned long irqflags); 398 unsigned int irqnum, unsigned long irqflags);
398extern void usb_remove_hcd(struct usb_hcd *hcd); 399extern void usb_remove_hcd(struct usb_hcd *hcd);
400extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1);
399 401
400struct platform_device; 402struct platform_device;
401extern void usb_hcd_platform_shutdown(struct platform_device *dev); 403extern void usb_hcd_platform_shutdown(struct platform_device *dev);
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index ef9be7e1e190..1819b59aab2a 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -66,6 +66,7 @@
66 * port. 66 * port.
67 * @flags: usb serial port flags 67 * @flags: usb serial port flags
68 * @write_wait: a wait_queue_head_t used by the port. 68 * @write_wait: a wait_queue_head_t used by the port.
69 * @delta_msr_wait: modem-status-change wait queue
69 * @work: work queue entry for the line discipline waking up. 70 * @work: work queue entry for the line discipline waking up.
70 * @throttled: nonzero if the read urb is inactive to throttle the device 71 * @throttled: nonzero if the read urb is inactive to throttle the device
71 * @throttle_req: nonzero if the tty wants to throttle us 72 * @throttle_req: nonzero if the tty wants to throttle us
@@ -112,6 +113,7 @@ struct usb_serial_port {
112 113
113 unsigned long flags; 114 unsigned long flags;
114 wait_queue_head_t write_wait; 115 wait_queue_head_t write_wait;
116 wait_queue_head_t delta_msr_wait;
115 struct work_struct work; 117 struct work_struct work;
116 char throttled; 118 char throttled;
117 char throttle_req; 119 char throttle_req;
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
index 6f033a415ecb..5c295c26ad37 100644
--- a/include/linux/usb/ulpi.h
+++ b/include/linux/usb/ulpi.h
@@ -181,8 +181,16 @@
181 181
182/*-------------------------------------------------------------------------*/ 182/*-------------------------------------------------------------------------*/
183 183
184#if IS_ENABLED(CONFIG_USB_ULPI)
184struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, 185struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
185 unsigned int flags); 186 unsigned int flags);
187#else
188static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
189 unsigned int flags)
190{
191 return NULL;
192}
193#endif
186 194
187#ifdef CONFIG_USB_ULPI_VIEWPORT 195#ifdef CONFIG_USB_ULPI_VIEWPORT
188/* access ops for controllers with a viewport register */ 196/* access ops for controllers with a viewport register */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 4ce009324933..b6b215f13b45 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -26,6 +26,8 @@ struct user_namespace {
26 kuid_t owner; 26 kuid_t owner;
27 kgid_t group; 27 kgid_t group;
28 unsigned int proc_inum; 28 unsigned int proc_inum;
29 bool may_mount_sysfs;
30 bool may_mount_proc;
29}; 31};
30 32
31extern struct user_namespace init_user_ns; 33extern struct user_namespace init_user_ns;
@@ -82,4 +84,6 @@ static inline void put_user_ns(struct user_namespace *ns)
82 84
83#endif 85#endif
84 86
87void update_mnt_policy(struct user_namespace *userns);
88
85#endif /* _LINUX_USER_H */ 89#endif /* _LINUX_USER_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 8afab27cdbc2..717975639378 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -11,6 +11,7 @@
11#include <linux/lockdep.h> 11#include <linux/lockdep.h>
12#include <linux/threads.h> 12#include <linux/threads.h>
13#include <linux/atomic.h> 13#include <linux/atomic.h>
14#include <linux/cpumask.h>
14 15
15struct workqueue_struct; 16struct workqueue_struct;
16 17
@@ -68,7 +69,7 @@ enum {
68 WORK_STRUCT_COLOR_BITS, 69 WORK_STRUCT_COLOR_BITS,
69 70
70 /* data contains off-queue information when !WORK_STRUCT_PWQ */ 71 /* data contains off-queue information when !WORK_STRUCT_PWQ */
71 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS, 72 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
72 73
73 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), 74 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
74 75
@@ -115,6 +116,20 @@ struct delayed_work {
115 int cpu; 116 int cpu;
116}; 117};
117 118
119/*
120 * A struct for workqueue attributes. This can be used to change
121 * attributes of an unbound workqueue.
122 *
123 * Unlike other fields, ->no_numa isn't a property of a worker_pool. It
124 * only modifies how apply_workqueue_attrs() select pools and thus doesn't
125 * participate in pool hash calculations or equality comparisons.
126 */
127struct workqueue_attrs {
128 int nice; /* nice level */
129 cpumask_var_t cpumask; /* allowed CPUs */
130 bool no_numa; /* disable NUMA affinity */
131};
132
118static inline struct delayed_work *to_delayed_work(struct work_struct *work) 133static inline struct delayed_work *to_delayed_work(struct work_struct *work)
119{ 134{
120 return container_of(work, struct delayed_work, work); 135 return container_of(work, struct delayed_work, work);
@@ -283,9 +298,10 @@ enum {
283 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 298 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
284 WQ_HIGHPRI = 1 << 4, /* high priority */ 299 WQ_HIGHPRI = 1 << 4, /* high priority */
285 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ 300 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
301 WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
286 302
287 WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */ 303 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
288 WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */ 304 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
289 305
290 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 306 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
291 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 307 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -388,7 +404,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
388 * Pointer to the allocated workqueue on success, %NULL on failure. 404 * Pointer to the allocated workqueue on success, %NULL on failure.
389 */ 405 */
390#define alloc_ordered_workqueue(fmt, flags, args...) \ 406#define alloc_ordered_workqueue(fmt, flags, args...) \
391 alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args) 407 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
392 408
393#define create_workqueue(name) \ 409#define create_workqueue(name) \
394 alloc_workqueue((name), WQ_MEM_RECLAIM, 1) 410 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
@@ -399,30 +415,23 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
399 415
400extern void destroy_workqueue(struct workqueue_struct *wq); 416extern void destroy_workqueue(struct workqueue_struct *wq);
401 417
418struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
419void free_workqueue_attrs(struct workqueue_attrs *attrs);
420int apply_workqueue_attrs(struct workqueue_struct *wq,
421 const struct workqueue_attrs *attrs);
422
402extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 423extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
403 struct work_struct *work); 424 struct work_struct *work);
404extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
405extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 425extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
406 struct delayed_work *work, unsigned long delay); 426 struct delayed_work *work, unsigned long delay);
407extern bool queue_delayed_work(struct workqueue_struct *wq,
408 struct delayed_work *work, unsigned long delay);
409extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 427extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
410 struct delayed_work *dwork, unsigned long delay); 428 struct delayed_work *dwork, unsigned long delay);
411extern bool mod_delayed_work(struct workqueue_struct *wq,
412 struct delayed_work *dwork, unsigned long delay);
413 429
414extern void flush_workqueue(struct workqueue_struct *wq); 430extern void flush_workqueue(struct workqueue_struct *wq);
415extern void drain_workqueue(struct workqueue_struct *wq); 431extern void drain_workqueue(struct workqueue_struct *wq);
416extern void flush_scheduled_work(void); 432extern void flush_scheduled_work(void);
417 433
418extern bool schedule_work_on(int cpu, struct work_struct *work);
419extern bool schedule_work(struct work_struct *work);
420extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
421 unsigned long delay);
422extern bool schedule_delayed_work(struct delayed_work *work,
423 unsigned long delay);
424extern int schedule_on_each_cpu(work_func_t func); 434extern int schedule_on_each_cpu(work_func_t func);
425extern int keventd_up(void);
426 435
427int execute_in_process_context(work_func_t fn, struct execute_work *); 436int execute_in_process_context(work_func_t fn, struct execute_work *);
428 437
@@ -435,9 +444,121 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
435 444
436extern void workqueue_set_max_active(struct workqueue_struct *wq, 445extern void workqueue_set_max_active(struct workqueue_struct *wq,
437 int max_active); 446 int max_active);
438extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); 447extern bool current_is_workqueue_rescuer(void);
448extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
439extern unsigned int work_busy(struct work_struct *work); 449extern unsigned int work_busy(struct work_struct *work);
440 450
451/**
452 * queue_work - queue work on a workqueue
453 * @wq: workqueue to use
454 * @work: work to queue
455 *
456 * Returns %false if @work was already on a queue, %true otherwise.
457 *
458 * We queue the work to the CPU on which it was submitted, but if the CPU dies
459 * it can be processed by another CPU.
460 */
461static inline bool queue_work(struct workqueue_struct *wq,
462 struct work_struct *work)
463{
464 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
465}
466
467/**
468 * queue_delayed_work - queue work on a workqueue after delay
469 * @wq: workqueue to use
470 * @dwork: delayable work to queue
471 * @delay: number of jiffies to wait before queueing
472 *
473 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
474 */
475static inline bool queue_delayed_work(struct workqueue_struct *wq,
476 struct delayed_work *dwork,
477 unsigned long delay)
478{
479 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
480}
481
482/**
483 * mod_delayed_work - modify delay of or queue a delayed work
484 * @wq: workqueue to use
485 * @dwork: work to queue
486 * @delay: number of jiffies to wait before queueing
487 *
488 * mod_delayed_work_on() on local CPU.
489 */
490static inline bool mod_delayed_work(struct workqueue_struct *wq,
491 struct delayed_work *dwork,
492 unsigned long delay)
493{
494 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
495}
496
497/**
498 * schedule_work_on - put work task on a specific cpu
499 * @cpu: cpu to put the work task on
500 * @work: job to be done
501 *
502 * This puts a job on a specific cpu
503 */
504static inline bool schedule_work_on(int cpu, struct work_struct *work)
505{
506 return queue_work_on(cpu, system_wq, work);
507}
508
509/**
510 * schedule_work - put work task in global workqueue
511 * @work: job to be done
512 *
513 * Returns %false if @work was already on the kernel-global workqueue and
514 * %true otherwise.
515 *
516 * This puts a job in the kernel-global workqueue if it was not already
517 * queued and leaves it in the same position on the kernel-global
518 * workqueue otherwise.
519 */
520static inline bool schedule_work(struct work_struct *work)
521{
522 return queue_work(system_wq, work);
523}
524
525/**
526 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
527 * @cpu: cpu to use
528 * @dwork: job to be done
529 * @delay: number of jiffies to wait
530 *
531 * After waiting for a given time this puts a job in the kernel-global
532 * workqueue on the specified CPU.
533 */
534static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
535 unsigned long delay)
536{
537 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
538}
539
540/**
541 * schedule_delayed_work - put work task in global workqueue after delay
542 * @dwork: job to be done
543 * @delay: number of jiffies to wait or 0 for immediate execution
544 *
545 * After waiting for a given time this puts a job in the kernel-global
546 * workqueue.
547 */
548static inline bool schedule_delayed_work(struct delayed_work *dwork,
549 unsigned long delay)
550{
551 return queue_delayed_work(system_wq, dwork, delay);
552}
553
554/**
555 * keventd_up - is workqueue initialized yet?
556 */
557static inline bool keventd_up(void)
558{
559 return system_wq != NULL;
560}
561
441/* 562/*
442 * Like above, but uses del_timer() instead of del_timer_sync(). This means, 563 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
443 * if it returns 0 the timer function may be running and the queueing is in 564 * if it returns 0 the timer function may be running and the queueing is in
@@ -466,12 +587,12 @@ static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwo
466} 587}
467 588
468#ifndef CONFIG_SMP 589#ifndef CONFIG_SMP
469static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 590static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
470{ 591{
471 return fn(arg); 592 return fn(arg);
472} 593}
473#else 594#else
474long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); 595long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
475#endif /* CONFIG_SMP */ 596#endif /* CONFIG_SMP */
476 597
477#ifdef CONFIG_FREEZER 598#ifdef CONFIG_FREEZER
@@ -480,4 +601,11 @@ extern bool freeze_workqueues_busy(void);
480extern void thaw_workqueues(void); 601extern void thaw_workqueues(void);
481#endif /* CONFIG_FREEZER */ 602#endif /* CONFIG_FREEZER */
482 603
604#ifdef CONFIG_SYSFS
605int workqueue_sysfs_register(struct workqueue_struct *wq);
606#else /* CONFIG_SYSFS */
607static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
608{ return 0; }
609#endif /* CONFIG_SYSFS */
610
483#endif 611#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 853cda11e518..1f8fd109e225 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -413,13 +413,15 @@ static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
413 413
414static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) 414static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
415{ 415{
416 return dst->ops->neigh_lookup(dst, NULL, daddr); 416 struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
417 return IS_ERR(n) ? NULL : n;
417} 418}
418 419
419static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, 420static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
420 struct sk_buff *skb) 421 struct sk_buff *skb)
421{ 422{
422 return dst->ops->neigh_lookup(dst, skb, NULL); 423 struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL);
424 return IS_ERR(n) ? NULL : n;
423} 425}
424 426
425static inline void dst_link_failure(struct sk_buff *skb) 427static inline void dst_link_failure(struct sk_buff *skb)
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
index 80461c1ae9ef..bb8271d487b7 100644
--- a/include/net/flow_keys.h
+++ b/include/net/flow_keys.h
@@ -9,6 +9,7 @@ struct flow_keys {
9 __be32 ports; 9 __be32 ports;
10 __be16 port16[2]; 10 __be16 port16[2];
11 }; 11 };
12 u16 thoff;
12 u8 ip_proto; 13 u8 ip_proto;
13}; 14};
14 15
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 76c3fe5ecc2e..0a1dcc2fa2f5 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -43,6 +43,13 @@ struct inet_frag_queue {
43 43
44#define INETFRAGS_HASHSZ 64 44#define INETFRAGS_HASHSZ 64
45 45
46/* averaged:
47 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
48 * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
49 * struct frag_queue))
50 */
51#define INETFRAGS_MAXDEPTH 128
52
46struct inet_frags { 53struct inet_frags {
47 struct hlist_head hash[INETFRAGS_HASHSZ]; 54 struct hlist_head hash[INETFRAGS_HASHSZ];
48 /* This rwlock is a global lock (seperate per IPv4, IPv6 and 55 /* This rwlock is a global lock (seperate per IPv4, IPv6 and
@@ -76,6 +83,8 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
76struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, 83struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
77 struct inet_frags *f, void *key, unsigned int hash) 84 struct inet_frags *f, void *key, unsigned int hash)
78 __releases(&f->lock); 85 __releases(&f->lock);
86void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
87 const char *prefix);
79 88
80static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) 89static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
81{ 90{
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 9497be1ad4c0..e49db91593a9 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -152,18 +152,16 @@ struct fib_result_nl {
152}; 152};
153 153
154#ifdef CONFIG_IP_ROUTE_MULTIPATH 154#ifdef CONFIG_IP_ROUTE_MULTIPATH
155
156#define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel]) 155#define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
157
158#define FIB_TABLE_HASHSZ 2
159
160#else /* CONFIG_IP_ROUTE_MULTIPATH */ 156#else /* CONFIG_IP_ROUTE_MULTIPATH */
161
162#define FIB_RES_NH(res) ((res).fi->fib_nh[0]) 157#define FIB_RES_NH(res) ((res).fi->fib_nh[0])
158#endif /* CONFIG_IP_ROUTE_MULTIPATH */
163 159
160#ifdef CONFIG_IP_MULTIPLE_TABLES
164#define FIB_TABLE_HASHSZ 256 161#define FIB_TABLE_HASHSZ 256
165 162#else
166#endif /* CONFIG_IP_ROUTE_MULTIPATH */ 163#define FIB_TABLE_HASHSZ 2
164#endif
167 165
168extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); 166extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
169 167
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 68c69d54d392..fce8e6b66d55 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -976,6 +976,7 @@ struct netns_ipvs {
976 int sysctl_sync_retries; 976 int sysctl_sync_retries;
977 int sysctl_nat_icmp_send; 977 int sysctl_nat_icmp_send;
978 int sysctl_pmtu_disc; 978 int sysctl_pmtu_disc;
979 int sysctl_backup_only;
979 980
980 /* ip_vs_lblc */ 981 /* ip_vs_lblc */
981 int sysctl_lblc_expiration; 982 int sysctl_lblc_expiration;
@@ -1067,6 +1068,12 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
1067 return ipvs->sysctl_pmtu_disc; 1068 return ipvs->sysctl_pmtu_disc;
1068} 1069}
1069 1070
1071static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
1072{
1073 return ipvs->sync_state & IP_VS_STATE_BACKUP &&
1074 ipvs->sysctl_backup_only;
1075}
1076
1070#else 1077#else
1071 1078
1072static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 1079static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1114,6 +1121,11 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
1114 return 1; 1121 return 1;
1115} 1122}
1116 1123
1124static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
1125{
1126 return 0;
1127}
1128
1117#endif 1129#endif
1118 1130
1119/* 1131/*
diff --git a/include/net/ipip.h b/include/net/ipip.h
index fd19625ff99d..982141c15200 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -77,15 +77,11 @@ static inline void tunnel_ip_select_ident(struct sk_buff *skb,
77{ 77{
78 struct iphdr *iph = ip_hdr(skb); 78 struct iphdr *iph = ip_hdr(skb);
79 79
80 if (iph->frag_off & htons(IP_DF)) 80 /* Use inner packet iph-id if possible. */
81 iph->id = 0; 81 if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
82 else { 82 iph->id = old_iph->id;
83 /* Use inner packet iph-id if possible. */ 83 else
84 if (skb->protocol == htons(ETH_P_IP) && old_iph->id) 84 __ip_select_ident(iph, dst,
85 iph->id = old_iph->id; 85 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
86 else
87 __ip_select_ident(iph, dst,
88 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
89 }
90} 86}
91#endif 87#endif
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 6a16fd2e70ed..464ea82e10db 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -183,7 +183,6 @@ DECLARE_EVENT_CLASS(writeback_work_class,
183DEFINE_EVENT(writeback_work_class, name, \ 183DEFINE_EVENT(writeback_work_class, name, \
184 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \ 184 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
185 TP_ARGS(bdi, work)) 185 TP_ARGS(bdi, work))
186DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
187DEFINE_WRITEBACK_WORK_EVENT(writeback_queue); 186DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
188DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); 187DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
189DEFINE_WRITEBACK_WORK_EVENT(writeback_start); 188DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
@@ -222,12 +221,8 @@ DEFINE_EVENT(writeback_class, name, \
222 221
223DEFINE_WRITEBACK_EVENT(writeback_nowork); 222DEFINE_WRITEBACK_EVENT(writeback_nowork);
224DEFINE_WRITEBACK_EVENT(writeback_wake_background); 223DEFINE_WRITEBACK_EVENT(writeback_wake_background);
225DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
226DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
227DEFINE_WRITEBACK_EVENT(writeback_bdi_register); 224DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
228DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); 225DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
229DEFINE_WRITEBACK_EVENT(writeback_thread_start);
230DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
231 226
232DECLARE_EVENT_CLASS(wbc_class, 227DECLARE_EVENT_CLASS(wbc_class,
233 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), 228 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
index 93f5fa94a431..afafd703ad92 100644
--- a/include/uapi/linux/packet_diag.h
+++ b/include/uapi/linux/packet_diag.h
@@ -33,9 +33,11 @@ enum {
33 PACKET_DIAG_TX_RING, 33 PACKET_DIAG_TX_RING,
34 PACKET_DIAG_FANOUT, 34 PACKET_DIAG_FANOUT,
35 35
36 PACKET_DIAG_MAX, 36 __PACKET_DIAG_MAX,
37}; 37};
38 38
39#define PACKET_DIAG_MAX (__PACKET_DIAG_MAX - 1)
40
39struct packet_diag_info { 41struct packet_diag_info {
40 __u32 pdi_index; 42 __u32 pdi_index;
41 __u32 pdi_version; 43 __u32 pdi_version;
diff --git a/include/uapi/linux/unix_diag.h b/include/uapi/linux/unix_diag.h
index b8a24941db21..b9e2a6a7446f 100644
--- a/include/uapi/linux/unix_diag.h
+++ b/include/uapi/linux/unix_diag.h
@@ -39,9 +39,11 @@ enum {
39 UNIX_DIAG_MEMINFO, 39 UNIX_DIAG_MEMINFO,
40 UNIX_DIAG_SHUTDOWN, 40 UNIX_DIAG_SHUTDOWN,
41 41
42 UNIX_DIAG_MAX, 42 __UNIX_DIAG_MAX,
43}; 43};
44 44
45#define UNIX_DIAG_MAX (__UNIX_DIAG_MAX - 1)
46
45struct unix_diag_vfs { 47struct unix_diag_vfs {
46 __u32 udiag_vfs_ino; 48 __u32 udiag_vfs_ino;
47 __u32 udiag_vfs_dev; 49 __u32 udiag_vfs_dev;
diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h
index 28447f1594fa..8deb22672ada 100644
--- a/include/video/atmel_lcdc.h
+++ b/include/video/atmel_lcdc.h
@@ -30,7 +30,6 @@
30 */ 30 */
31#define ATMEL_LCDC_WIRING_BGR 0 31#define ATMEL_LCDC_WIRING_BGR 0
32#define ATMEL_LCDC_WIRING_RGB 1 32#define ATMEL_LCDC_WIRING_RGB 1
33#define ATMEL_LCDC_WIRING_RGB555 2
34 33
35 34
36 /* LCD Controller info data structure, stored in device platform_data */ 35 /* LCD Controller info data structure, stored in device platform_data */
@@ -62,6 +61,7 @@ struct atmel_lcdfb_info {
62 void (*atmel_lcdfb_power_control)(int on); 61 void (*atmel_lcdfb_power_control)(int on);
63 struct fb_monspecs *default_monspecs; 62 struct fb_monspecs *default_monspecs;
64 u32 pseudo_palette[16]; 63 u32 pseudo_palette[16];
64 bool have_intensity_bit;
65}; 65};
66 66
67#define ATMEL_LCDC_DMABADDR1 0x00 67#define ATMEL_LCDC_DMABADDR1 0x00
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 01c3d62436ef..ffd4652de91c 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -138,11 +138,21 @@ struct blkif_request_discard {
138 uint8_t _pad3; 138 uint8_t _pad3;
139} __attribute__((__packed__)); 139} __attribute__((__packed__));
140 140
141struct blkif_request_other {
142 uint8_t _pad1;
143 blkif_vdev_t _pad2; /* only for read/write requests */
144#ifdef CONFIG_X86_64
145 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/
146#endif
147 uint64_t id; /* private guest value, echoed in resp */
148} __attribute__((__packed__));
149
141struct blkif_request { 150struct blkif_request {
142 uint8_t operation; /* BLKIF_OP_??? */ 151 uint8_t operation; /* BLKIF_OP_??? */
143 union { 152 union {
144 struct blkif_request_rw rw; 153 struct blkif_request_rw rw;
145 struct blkif_request_discard discard; 154 struct blkif_request_discard discard;
155 struct blkif_request_other other;
146 } u; 156 } u;
147} __attribute__((__packed__)); 157} __attribute__((__packed__));
148 158
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index 1844d31f4552..7000bb1f6e96 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -251,6 +251,12 @@ struct physdev_pci_device_add {
251 251
252#define PHYSDEVOP_pci_device_remove 26 252#define PHYSDEVOP_pci_device_remove 26
253#define PHYSDEVOP_restore_msi_ext 27 253#define PHYSDEVOP_restore_msi_ext 27
254/*
255 * Dom0 should use these two to announce MMIO resources assigned to
256 * MSI-X capable devices won't (prepare) or may (release) change.
257 */
258#define PHYSDEVOP_prepare_msix 30
259#define PHYSDEVOP_release_msix 31
254struct physdev_pci_device { 260struct physdev_pci_device {
255 /* IN */ 261 /* IN */
256 uint16_t seg; 262 uint16_t seg;
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index e5c4f609f22c..e4e47f647446 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -330,8 +330,16 @@ static struct dentry *mqueue_mount(struct file_system_type *fs_type,
330 int flags, const char *dev_name, 330 int flags, const char *dev_name,
331 void *data) 331 void *data)
332{ 332{
333 if (!(flags & MS_KERNMOUNT)) 333 if (!(flags & MS_KERNMOUNT)) {
334 data = current->nsproxy->ipc_ns; 334 struct ipc_namespace *ns = current->nsproxy->ipc_ns;
335 /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
336 * over the ipc namespace.
337 */
338 if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
339 return ERR_PTR(-EPERM);
340
341 data = ns;
342 }
335 return mount_ns(fs_type, flags, data, mqueue_fill_super); 343 return mount_ns(fs_type, flags, data, mqueue_fill_super);
336} 344}
337 345
@@ -840,7 +848,8 @@ out_putfd:
840 fd = error; 848 fd = error;
841 } 849 }
842 mutex_unlock(&root->d_inode->i_mutex); 850 mutex_unlock(&root->d_inode->i_mutex);
843 mnt_drop_write(mnt); 851 if (!ro)
852 mnt_drop_write(mnt);
844out_putname: 853out_putname:
845 putname(name); 854 putname(name);
846 return fd; 855 return fd;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a32f9432666c..3852d926322c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2224,11 +2224,11 @@ retry_find_task:
2224 tsk = tsk->group_leader; 2224 tsk = tsk->group_leader;
2225 2225
2226 /* 2226 /*
2227 * Workqueue threads may acquire PF_THREAD_BOUND and become 2227 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2228 * trapped in a cpuset, or RT worker may be born in a cgroup 2228 * trapped in a cpuset, or RT worker may be born in a cgroup
2229 * with no rt_runtime allocated. Just say no. 2229 * with no rt_runtime allocated. Just say no.
2230 */ 2230 */
2231 if (tsk == kthreadd_task || (tsk->flags & PF_THREAD_BOUND)) { 2231 if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2232 ret = -EINVAL; 2232 ret = -EINVAL;
2233 rcu_read_unlock(); 2233 rcu_read_unlock();
2234 goto out_unlock_cgroup; 2234 goto out_unlock_cgroup;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 4f9dfe43ecbd..f22e94792707 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1388,16 +1388,16 @@ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1388 1388
1389 cgroup_taskset_for_each(task, cgrp, tset) { 1389 cgroup_taskset_for_each(task, cgrp, tset) {
1390 /* 1390 /*
1391 * Kthreads bound to specific cpus cannot be moved to a new 1391 * Kthreads which disallow setaffinity shouldn't be moved
1392 * cpuset; we cannot change their cpu affinity and 1392 * to a new cpuset; we don't want to change their cpu
1393 * isolating such threads by their set of allowed nodes is 1393 * affinity and isolating such threads by their set of
1394 * unnecessary. Thus, cpusets are not applicable for such 1394 * allowed nodes is unnecessary. Thus, cpusets are not
1395 * threads. This prevents checking for success of 1395 * applicable for such threads. This prevents checking for
1396 * set_cpus_allowed_ptr() on all attached tasks before 1396 * success of set_cpus_allowed_ptr() on all attached tasks
1397 * cpus_allowed may be changed. 1397 * before cpus_allowed may be changed.
1398 */ 1398 */
1399 ret = -EINVAL; 1399 ret = -EINVAL;
1400 if (task->flags & PF_THREAD_BOUND) 1400 if (task->flags & PF_NO_SETAFFINITY)
1401 goto out_unlock; 1401 goto out_unlock;
1402 ret = security_task_setscheduler(task); 1402 ret = security_task_setscheduler(task);
1403 if (ret) 1403 if (ret)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b0cd86501c30..59412d037eed 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4434,12 +4434,15 @@ static void perf_event_task_event(struct perf_task_event *task_event)
4434 if (ctxn < 0) 4434 if (ctxn < 0)
4435 goto next; 4435 goto next;
4436 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 4436 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4437 if (ctx)
4438 perf_event_task_ctx(ctx, task_event);
4437 } 4439 }
4438 if (ctx)
4439 perf_event_task_ctx(ctx, task_event);
4440next: 4440next:
4441 put_cpu_ptr(pmu->pmu_cpu_context); 4441 put_cpu_ptr(pmu->pmu_cpu_context);
4442 } 4442 }
4443 if (task_event->task_ctx)
4444 perf_event_task_ctx(task_event->task_ctx, task_event);
4445
4443 rcu_read_unlock(); 4446 rcu_read_unlock();
4444} 4447}
4445 4448
@@ -5647,6 +5650,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
5647 event->attr.sample_period = NSEC_PER_SEC / freq; 5650 event->attr.sample_period = NSEC_PER_SEC / freq;
5648 hwc->sample_period = event->attr.sample_period; 5651 hwc->sample_period = event->attr.sample_period;
5649 local64_set(&hwc->period_left, hwc->sample_period); 5652 local64_set(&hwc->period_left, hwc->sample_period);
5653 hwc->last_period = hwc->sample_period;
5650 event->attr.freq = 0; 5654 event->attr.freq = 0;
5651 } 5655 }
5652} 5656}
diff --git a/kernel/exit.c b/kernel/exit.c
index 51e485ca9935..60bc027c61c3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -835,7 +835,7 @@ void do_exit(long code)
835 /* 835 /*
836 * Make sure we are holding no locks: 836 * Make sure we are holding no locks:
837 */ 837 */
838 debug_check_no_locks_held(); 838 debug_check_no_locks_held(tsk);
839 /* 839 /*
840 * We can do this unlocked here. The futex code uses this flag 840 * We can do this unlocked here. The futex code uses this flag
841 * just to verify whether the pi state cleanup has been done 841 * just to verify whether the pi state cleanup has been done
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 691dc2ef9baf..a2fbbb782bad 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -260,7 +260,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu)
260{ 260{
261 /* It's safe because the task is inactive. */ 261 /* It's safe because the task is inactive. */
262 do_set_cpus_allowed(p, cpumask_of(cpu)); 262 do_set_cpus_allowed(p, cpumask_of(cpu));
263 p->flags |= PF_THREAD_BOUND; 263 p->flags |= PF_NO_SETAFFINITY;
264} 264}
265 265
266/** 266/**
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 259db207b5d9..8a0efac4f99d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -4088,7 +4088,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4088} 4088}
4089EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); 4089EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4090 4090
4091static void print_held_locks_bug(void) 4091static void print_held_locks_bug(struct task_struct *curr)
4092{ 4092{
4093 if (!debug_locks_off()) 4093 if (!debug_locks_off())
4094 return; 4094 return;
@@ -4097,21 +4097,22 @@ static void print_held_locks_bug(void)
4097 4097
4098 printk("\n"); 4098 printk("\n");
4099 printk("=====================================\n"); 4099 printk("=====================================\n");
4100 printk("[ BUG: %s/%d still has locks held! ]\n", 4100 printk("[ BUG: lock held at task exit time! ]\n");
4101 current->comm, task_pid_nr(current));
4102 print_kernel_ident(); 4101 print_kernel_ident();
4103 printk("-------------------------------------\n"); 4102 printk("-------------------------------------\n");
4104 lockdep_print_held_locks(current); 4103 printk("%s/%d is exiting with locks still held!\n",
4104 curr->comm, task_pid_nr(curr));
4105 lockdep_print_held_locks(curr);
4106
4105 printk("\nstack backtrace:\n"); 4107 printk("\nstack backtrace:\n");
4106 dump_stack(); 4108 dump_stack();
4107} 4109}
4108 4110
4109void debug_check_no_locks_held(void) 4111void debug_check_no_locks_held(struct task_struct *task)
4110{ 4112{
4111 if (unlikely(current->lockdep_depth > 0)) 4113 if (unlikely(task->lockdep_depth > 0))
4112 print_held_locks_bug(); 4114 print_held_locks_bug(task);
4113} 4115}
4114EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4115 4116
4116void debug_show_all_locks(void) 4117void debug_show_all_locks(void)
4117{ 4118{
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index c1c3dc1c6023..bea15bdf82b0 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -181,6 +181,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
181 int nr; 181 int nr;
182 int rc; 182 int rc;
183 struct task_struct *task, *me = current; 183 struct task_struct *task, *me = current;
184 int init_pids = thread_group_leader(me) ? 1 : 2;
184 185
185 /* Don't allow any more processes into the pid namespace */ 186 /* Don't allow any more processes into the pid namespace */
186 disable_pid_allocation(pid_ns); 187 disable_pid_allocation(pid_ns);
@@ -230,7 +231,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
230 */ 231 */
231 for (;;) { 232 for (;;) {
232 set_current_state(TASK_UNINTERRUPTIBLE); 233 set_current_state(TASK_UNINTERRUPTIBLE);
233 if (pid_ns->nr_hashed == 1) 234 if (pid_ns->nr_hashed == init_pids)
234 break; 235 break;
235 schedule(); 236 schedule();
236 } 237 }
diff --git a/kernel/printk.c b/kernel/printk.c
index 0b31715f335a..abbdd9e2ac82 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -63,8 +63,6 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
63#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ 63#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
64#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ 64#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
65 65
66DECLARE_WAIT_QUEUE_HEAD(log_wait);
67
68int console_printk[4] = { 66int console_printk[4] = {
69 DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ 67 DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */
70 DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ 68 DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */
@@ -224,6 +222,7 @@ struct log {
224static DEFINE_RAW_SPINLOCK(logbuf_lock); 222static DEFINE_RAW_SPINLOCK(logbuf_lock);
225 223
226#ifdef CONFIG_PRINTK 224#ifdef CONFIG_PRINTK
225DECLARE_WAIT_QUEUE_HEAD(log_wait);
227/* the next printk record to read by syslog(READ) or /proc/kmsg */ 226/* the next printk record to read by syslog(READ) or /proc/kmsg */
228static u64 syslog_seq; 227static u64 syslog_seq;
229static u32 syslog_idx; 228static u32 syslog_idx;
@@ -1957,45 +1956,6 @@ int is_console_locked(void)
1957 return console_locked; 1956 return console_locked;
1958} 1957}
1959 1958
1960/*
1961 * Delayed printk version, for scheduler-internal messages:
1962 */
1963#define PRINTK_BUF_SIZE 512
1964
1965#define PRINTK_PENDING_WAKEUP 0x01
1966#define PRINTK_PENDING_SCHED 0x02
1967
1968static DEFINE_PER_CPU(int, printk_pending);
1969static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
1970
1971static void wake_up_klogd_work_func(struct irq_work *irq_work)
1972{
1973 int pending = __this_cpu_xchg(printk_pending, 0);
1974
1975 if (pending & PRINTK_PENDING_SCHED) {
1976 char *buf = __get_cpu_var(printk_sched_buf);
1977 printk(KERN_WARNING "[sched_delayed] %s", buf);
1978 }
1979
1980 if (pending & PRINTK_PENDING_WAKEUP)
1981 wake_up_interruptible(&log_wait);
1982}
1983
1984static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
1985 .func = wake_up_klogd_work_func,
1986 .flags = IRQ_WORK_LAZY,
1987};
1988
1989void wake_up_klogd(void)
1990{
1991 preempt_disable();
1992 if (waitqueue_active(&log_wait)) {
1993 this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
1994 irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
1995 }
1996 preempt_enable();
1997}
1998
1999static void console_cont_flush(char *text, size_t size) 1959static void console_cont_flush(char *text, size_t size)
2000{ 1960{
2001 unsigned long flags; 1961 unsigned long flags;
@@ -2458,6 +2418,44 @@ static int __init printk_late_init(void)
2458late_initcall(printk_late_init); 2418late_initcall(printk_late_init);
2459 2419
2460#if defined CONFIG_PRINTK 2420#if defined CONFIG_PRINTK
2421/*
2422 * Delayed printk version, for scheduler-internal messages:
2423 */
2424#define PRINTK_BUF_SIZE 512
2425
2426#define PRINTK_PENDING_WAKEUP 0x01
2427#define PRINTK_PENDING_SCHED 0x02
2428
2429static DEFINE_PER_CPU(int, printk_pending);
2430static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
2431
2432static void wake_up_klogd_work_func(struct irq_work *irq_work)
2433{
2434 int pending = __this_cpu_xchg(printk_pending, 0);
2435
2436 if (pending & PRINTK_PENDING_SCHED) {
2437 char *buf = __get_cpu_var(printk_sched_buf);
2438 printk(KERN_WARNING "[sched_delayed] %s", buf);
2439 }
2440
2441 if (pending & PRINTK_PENDING_WAKEUP)
2442 wake_up_interruptible(&log_wait);
2443}
2444
2445static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
2446 .func = wake_up_klogd_work_func,
2447 .flags = IRQ_WORK_LAZY,
2448};
2449
2450void wake_up_klogd(void)
2451{
2452 preempt_disable();
2453 if (waitqueue_active(&log_wait)) {
2454 this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
2455 irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
2456 }
2457 preempt_enable();
2458}
2461 2459
2462int printk_sched(const char *fmt, ...) 2460int printk_sched(const char *fmt, ...)
2463{ 2461{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7f12624a393c..23606ee961b5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4126,6 +4126,10 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4126 get_task_struct(p); 4126 get_task_struct(p);
4127 rcu_read_unlock(); 4127 rcu_read_unlock();
4128 4128
4129 if (p->flags & PF_NO_SETAFFINITY) {
4130 retval = -EINVAL;
4131 goto out_put_task;
4132 }
4129 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 4133 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4130 retval = -ENOMEM; 4134 retval = -ENOMEM;
4131 goto out_put_task; 4135 goto out_put_task;
@@ -4773,11 +4777,6 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4773 goto out; 4777 goto out;
4774 } 4778 }
4775 4779
4776 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
4777 ret = -EINVAL;
4778 goto out;
4779 }
4780
4781 do_set_cpus_allowed(p, new_mask); 4780 do_set_cpus_allowed(p, new_mask);
4782 4781
4783 /* Can the task run on the task's current CPU? If so, we're done */ 4782 /* Can the task run on the task's current CPU? If so, we're done */
diff --git a/kernel/sys.c b/kernel/sys.c
index 81f56445fba9..39c9c4a2949f 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2185,9 +2185,8 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2185 2185
2186char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; 2186char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
2187 2187
2188static int __orderly_poweroff(void) 2188static int __orderly_poweroff(bool force)
2189{ 2189{
2190 int argc;
2191 char **argv; 2190 char **argv;
2192 static char *envp[] = { 2191 static char *envp[] = {
2193 "HOME=/", 2192 "HOME=/",
@@ -2196,20 +2195,40 @@ static int __orderly_poweroff(void)
2196 }; 2195 };
2197 int ret; 2196 int ret;
2198 2197
2199 argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc); 2198 argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL);
2200 if (argv == NULL) { 2199 if (argv) {
2200 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
2201 argv_free(argv);
2202 } else {
2201 printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", 2203 printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
2202 __func__, poweroff_cmd); 2204 __func__, poweroff_cmd);
2203 return -ENOMEM; 2205 ret = -ENOMEM;
2204 } 2206 }
2205 2207
2206 ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC, 2208 if (ret && force) {
2207 NULL, NULL, NULL); 2209 printk(KERN_WARNING "Failed to start orderly shutdown: "
2208 argv_free(argv); 2210 "forcing the issue\n");
2211 /*
2212 * I guess this should try to kick off some daemon to sync and
2213 * poweroff asap. Or not even bother syncing if we're doing an
2214 * emergency shutdown?
2215 */
2216 emergency_sync();
2217 kernel_power_off();
2218 }
2209 2219
2210 return ret; 2220 return ret;
2211} 2221}
2212 2222
2223static bool poweroff_force;
2224
2225static void poweroff_work_func(struct work_struct *work)
2226{
2227 __orderly_poweroff(poweroff_force);
2228}
2229
2230static DECLARE_WORK(poweroff_work, poweroff_work_func);
2231
2213/** 2232/**
2214 * orderly_poweroff - Trigger an orderly system poweroff 2233 * orderly_poweroff - Trigger an orderly system poweroff
2215 * @force: force poweroff if command execution fails 2234 * @force: force poweroff if command execution fails
@@ -2219,21 +2238,9 @@ static int __orderly_poweroff(void)
2219 */ 2238 */
2220int orderly_poweroff(bool force) 2239int orderly_poweroff(bool force)
2221{ 2240{
2222 int ret = __orderly_poweroff(); 2241 if (force) /* do not override the pending "true" */
2223 2242 poweroff_force = true;
2224 if (ret && force) { 2243 schedule_work(&poweroff_work);
2225 printk(KERN_WARNING "Failed to start orderly shutdown: " 2244 return 0;
2226 "forcing the issue\n");
2227
2228 /*
2229 * I guess this should try to kick off some daemon to sync and
2230 * poweroff asap. Or not even bother syncing if we're doing an
2231 * emergency shutdown?
2232 */
2233 emergency_sync();
2234 kernel_power_off();
2235 }
2236
2237 return ret;
2238} 2245}
2239EXPORT_SYMBOL_GPL(orderly_poweroff); 2246EXPORT_SYMBOL_GPL(orderly_poweroff);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 2fb8cb88df8d..7f32fe0e52cd 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -67,7 +67,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
67 */ 67 */
68int tick_check_broadcast_device(struct clock_event_device *dev) 68int tick_check_broadcast_device(struct clock_event_device *dev)
69{ 69{
70 if ((tick_broadcast_device.evtdev && 70 if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
71 (tick_broadcast_device.evtdev &&
71 tick_broadcast_device.evtdev->rating >= dev->rating) || 72 tick_broadcast_device.evtdev->rating >= dev->rating) ||
72 (dev->features & CLOCK_EVT_FEAT_C3STOP)) 73 (dev->features & CLOCK_EVT_FEAT_C3STOP))
73 return 0; 74 return 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ab25b88aae56..6893d5a2bf08 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3104,8 +3104,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3104 continue; 3104 continue;
3105 } 3105 }
3106 3106
3107 hlist_del(&entry->node); 3107 hlist_del_rcu(&entry->node);
3108 call_rcu(&entry->rcu, ftrace_free_entry_rcu); 3108 call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
3109 } 3109 }
3110 } 3110 }
3111 __disable_ftrace_function_probe(); 3111 __disable_ftrace_function_probe();
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1f835a83cb2c..4f1dade56981 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -704,7 +704,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
704void 704void
705update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 705update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
706{ 706{
707 struct ring_buffer *buf = tr->buffer; 707 struct ring_buffer *buf;
708 708
709 if (trace_stop_count) 709 if (trace_stop_count)
710 return; 710 return;
@@ -719,6 +719,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
719 719
720 arch_spin_lock(&ftrace_max_lock); 720 arch_spin_lock(&ftrace_max_lock);
721 721
722 buf = tr->buffer;
722 tr->buffer = max_tr.buffer; 723 tr->buffer = max_tr.buffer;
723 max_tr.buffer = buf; 724 max_tr.buffer = buf;
724 725
@@ -2880,11 +2881,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2880 return -EINVAL; 2881 return -EINVAL;
2881} 2882}
2882 2883
2883static void set_tracer_flags(unsigned int mask, int enabled) 2884/* Some tracers require overwrite to stay enabled */
2885int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
2886{
2887 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
2888 return -1;
2889
2890 return 0;
2891}
2892
2893int set_tracer_flag(unsigned int mask, int enabled)
2884{ 2894{
2885 /* do nothing if flag is already set */ 2895 /* do nothing if flag is already set */
2886 if (!!(trace_flags & mask) == !!enabled) 2896 if (!!(trace_flags & mask) == !!enabled)
2887 return; 2897 return 0;
2898
2899 /* Give the tracer a chance to approve the change */
2900 if (current_trace->flag_changed)
2901 if (current_trace->flag_changed(current_trace, mask, !!enabled))
2902 return -EINVAL;
2888 2903
2889 if (enabled) 2904 if (enabled)
2890 trace_flags |= mask; 2905 trace_flags |= mask;
@@ -2894,18 +2909,24 @@ static void set_tracer_flags(unsigned int mask, int enabled)
2894 if (mask == TRACE_ITER_RECORD_CMD) 2909 if (mask == TRACE_ITER_RECORD_CMD)
2895 trace_event_enable_cmd_record(enabled); 2910 trace_event_enable_cmd_record(enabled);
2896 2911
2897 if (mask == TRACE_ITER_OVERWRITE) 2912 if (mask == TRACE_ITER_OVERWRITE) {
2898 ring_buffer_change_overwrite(global_trace.buffer, enabled); 2913 ring_buffer_change_overwrite(global_trace.buffer, enabled);
2914#ifdef CONFIG_TRACER_MAX_TRACE
2915 ring_buffer_change_overwrite(max_tr.buffer, enabled);
2916#endif
2917 }
2899 2918
2900 if (mask == TRACE_ITER_PRINTK) 2919 if (mask == TRACE_ITER_PRINTK)
2901 trace_printk_start_stop_comm(enabled); 2920 trace_printk_start_stop_comm(enabled);
2921
2922 return 0;
2902} 2923}
2903 2924
2904static int trace_set_options(char *option) 2925static int trace_set_options(char *option)
2905{ 2926{
2906 char *cmp; 2927 char *cmp;
2907 int neg = 0; 2928 int neg = 0;
2908 int ret = 0; 2929 int ret = -ENODEV;
2909 int i; 2930 int i;
2910 2931
2911 cmp = strstrip(option); 2932 cmp = strstrip(option);
@@ -2915,19 +2936,20 @@ static int trace_set_options(char *option)
2915 cmp += 2; 2936 cmp += 2;
2916 } 2937 }
2917 2938
2939 mutex_lock(&trace_types_lock);
2940
2918 for (i = 0; trace_options[i]; i++) { 2941 for (i = 0; trace_options[i]; i++) {
2919 if (strcmp(cmp, trace_options[i]) == 0) { 2942 if (strcmp(cmp, trace_options[i]) == 0) {
2920 set_tracer_flags(1 << i, !neg); 2943 ret = set_tracer_flag(1 << i, !neg);
2921 break; 2944 break;
2922 } 2945 }
2923 } 2946 }
2924 2947
2925 /* If no option could be set, test the specific tracer options */ 2948 /* If no option could be set, test the specific tracer options */
2926 if (!trace_options[i]) { 2949 if (!trace_options[i])
2927 mutex_lock(&trace_types_lock);
2928 ret = set_tracer_option(current_trace, cmp, neg); 2950 ret = set_tracer_option(current_trace, cmp, neg);
2929 mutex_unlock(&trace_types_lock); 2951
2930 } 2952 mutex_unlock(&trace_types_lock);
2931 2953
2932 return ret; 2954 return ret;
2933} 2955}
@@ -2937,6 +2959,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2937 size_t cnt, loff_t *ppos) 2959 size_t cnt, loff_t *ppos)
2938{ 2960{
2939 char buf[64]; 2961 char buf[64];
2962 int ret;
2940 2963
2941 if (cnt >= sizeof(buf)) 2964 if (cnt >= sizeof(buf))
2942 return -EINVAL; 2965 return -EINVAL;
@@ -2946,7 +2969,9 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2946 2969
2947 buf[cnt] = 0; 2970 buf[cnt] = 0;
2948 2971
2949 trace_set_options(buf); 2972 ret = trace_set_options(buf);
2973 if (ret < 0)
2974 return ret;
2950 2975
2951 *ppos += cnt; 2976 *ppos += cnt;
2952 2977
@@ -3250,6 +3275,9 @@ static int tracing_set_tracer(const char *buf)
3250 goto out; 3275 goto out;
3251 3276
3252 trace_branch_disable(); 3277 trace_branch_disable();
3278
3279 current_trace->enabled = false;
3280
3253 if (current_trace->reset) 3281 if (current_trace->reset)
3254 current_trace->reset(tr); 3282 current_trace->reset(tr);
3255 3283
@@ -3294,6 +3322,7 @@ static int tracing_set_tracer(const char *buf)
3294 } 3322 }
3295 3323
3296 current_trace = t; 3324 current_trace = t;
3325 current_trace->enabled = true;
3297 trace_branch_enable(tr); 3326 trace_branch_enable(tr);
3298 out: 3327 out:
3299 mutex_unlock(&trace_types_lock); 3328 mutex_unlock(&trace_types_lock);
@@ -4780,7 +4809,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
4780 4809
4781 if (val != 0 && val != 1) 4810 if (val != 0 && val != 1)
4782 return -EINVAL; 4811 return -EINVAL;
4783 set_tracer_flags(1 << index, val); 4812
4813 mutex_lock(&trace_types_lock);
4814 ret = set_tracer_flag(1 << index, val);
4815 mutex_unlock(&trace_types_lock);
4816
4817 if (ret < 0)
4818 return ret;
4784 4819
4785 *ppos += cnt; 4820 *ppos += cnt;
4786 4821
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 57d7e5397d56..2081971367ea 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -283,11 +283,15 @@ struct tracer {
283 enum print_line_t (*print_line)(struct trace_iterator *iter); 283 enum print_line_t (*print_line)(struct trace_iterator *iter);
284 /* If you handled the flag setting, return 0 */ 284 /* If you handled the flag setting, return 0 */
285 int (*set_flag)(u32 old_flags, u32 bit, int set); 285 int (*set_flag)(u32 old_flags, u32 bit, int set);
286 /* Return 0 if OK with change, else return non-zero */
287 int (*flag_changed)(struct tracer *tracer,
288 u32 mask, int set);
286 struct tracer *next; 289 struct tracer *next;
287 struct tracer_flags *flags; 290 struct tracer_flags *flags;
288 bool print_max; 291 bool print_max;
289 bool use_max_tr; 292 bool use_max_tr;
290 bool allocated_snapshot; 293 bool allocated_snapshot;
294 bool enabled;
291}; 295};
292 296
293 297
@@ -943,6 +947,8 @@ extern const char *__stop___trace_bprintk_fmt[];
943 947
944void trace_printk_init_buffers(void); 948void trace_printk_init_buffers(void);
945void trace_printk_start_comm(void); 949void trace_printk_start_comm(void);
950int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
951int set_tracer_flag(unsigned int mask, int enabled);
946 952
947#undef FTRACE_ENTRY 953#undef FTRACE_ENTRY
948#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 954#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 713a2cac4881..443b25b43b4f 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -32,7 +32,7 @@ enum {
32 32
33static int trace_type __read_mostly; 33static int trace_type __read_mostly;
34 34
35static int save_lat_flag; 35static int save_flags;
36 36
37static void stop_irqsoff_tracer(struct trace_array *tr, int graph); 37static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
38static int start_irqsoff_tracer(struct trace_array *tr, int graph); 38static int start_irqsoff_tracer(struct trace_array *tr, int graph);
@@ -558,8 +558,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
558 558
559static void __irqsoff_tracer_init(struct trace_array *tr) 559static void __irqsoff_tracer_init(struct trace_array *tr)
560{ 560{
561 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; 561 save_flags = trace_flags;
562 trace_flags |= TRACE_ITER_LATENCY_FMT; 562
563 /* non overwrite screws up the latency tracers */
564 set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
565 set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
563 566
564 tracing_max_latency = 0; 567 tracing_max_latency = 0;
565 irqsoff_trace = tr; 568 irqsoff_trace = tr;
@@ -573,10 +576,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
573 576
574static void irqsoff_tracer_reset(struct trace_array *tr) 577static void irqsoff_tracer_reset(struct trace_array *tr)
575{ 578{
579 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
580 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
581
576 stop_irqsoff_tracer(tr, is_graph()); 582 stop_irqsoff_tracer(tr, is_graph());
577 583
578 if (!save_lat_flag) 584 set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
579 trace_flags &= ~TRACE_ITER_LATENCY_FMT; 585 set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
580} 586}
581 587
582static void irqsoff_tracer_start(struct trace_array *tr) 588static void irqsoff_tracer_start(struct trace_array *tr)
@@ -609,6 +615,7 @@ static struct tracer irqsoff_tracer __read_mostly =
609 .print_line = irqsoff_print_line, 615 .print_line = irqsoff_print_line,
610 .flags = &tracer_flags, 616 .flags = &tracer_flags,
611 .set_flag = irqsoff_set_flag, 617 .set_flag = irqsoff_set_flag,
618 .flag_changed = trace_keep_overwrite,
612#ifdef CONFIG_FTRACE_SELFTEST 619#ifdef CONFIG_FTRACE_SELFTEST
613 .selftest = trace_selftest_startup_irqsoff, 620 .selftest = trace_selftest_startup_irqsoff,
614#endif 621#endif
@@ -642,6 +649,7 @@ static struct tracer preemptoff_tracer __read_mostly =
642 .print_line = irqsoff_print_line, 649 .print_line = irqsoff_print_line,
643 .flags = &tracer_flags, 650 .flags = &tracer_flags,
644 .set_flag = irqsoff_set_flag, 651 .set_flag = irqsoff_set_flag,
652 .flag_changed = trace_keep_overwrite,
645#ifdef CONFIG_FTRACE_SELFTEST 653#ifdef CONFIG_FTRACE_SELFTEST
646 .selftest = trace_selftest_startup_preemptoff, 654 .selftest = trace_selftest_startup_preemptoff,
647#endif 655#endif
@@ -677,6 +685,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
677 .print_line = irqsoff_print_line, 685 .print_line = irqsoff_print_line,
678 .flags = &tracer_flags, 686 .flags = &tracer_flags,
679 .set_flag = irqsoff_set_flag, 687 .set_flag = irqsoff_set_flag,
688 .flag_changed = trace_keep_overwrite,
680#ifdef CONFIG_FTRACE_SELFTEST 689#ifdef CONFIG_FTRACE_SELFTEST
681 .selftest = trace_selftest_startup_preemptirqsoff, 690 .selftest = trace_selftest_startup_preemptirqsoff,
682#endif 691#endif
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 75aa97fbe1a1..fde652c9a511 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr);
36static int wakeup_graph_entry(struct ftrace_graph_ent *trace); 36static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
37static void wakeup_graph_return(struct ftrace_graph_ret *trace); 37static void wakeup_graph_return(struct ftrace_graph_ret *trace);
38 38
39static int save_lat_flag; 39static int save_flags;
40 40
41#define TRACE_DISPLAY_GRAPH 1 41#define TRACE_DISPLAY_GRAPH 1
42 42
@@ -540,8 +540,11 @@ static void stop_wakeup_tracer(struct trace_array *tr)
540 540
541static int __wakeup_tracer_init(struct trace_array *tr) 541static int __wakeup_tracer_init(struct trace_array *tr)
542{ 542{
543 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; 543 save_flags = trace_flags;
544 trace_flags |= TRACE_ITER_LATENCY_FMT; 544
545 /* non overwrite screws up the latency tracers */
546 set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
547 set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
545 548
546 tracing_max_latency = 0; 549 tracing_max_latency = 0;
547 wakeup_trace = tr; 550 wakeup_trace = tr;
@@ -563,12 +566,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
563 566
564static void wakeup_tracer_reset(struct trace_array *tr) 567static void wakeup_tracer_reset(struct trace_array *tr)
565{ 568{
569 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
570 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
571
566 stop_wakeup_tracer(tr); 572 stop_wakeup_tracer(tr);
567 /* make sure we put back any tasks we are tracing */ 573 /* make sure we put back any tasks we are tracing */
568 wakeup_reset(tr); 574 wakeup_reset(tr);
569 575
570 if (!save_lat_flag) 576 set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
571 trace_flags &= ~TRACE_ITER_LATENCY_FMT; 577 set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
572} 578}
573 579
574static void wakeup_tracer_start(struct trace_array *tr) 580static void wakeup_tracer_start(struct trace_array *tr)
@@ -594,6 +600,7 @@ static struct tracer wakeup_tracer __read_mostly =
594 .print_line = wakeup_print_line, 600 .print_line = wakeup_print_line,
595 .flags = &tracer_flags, 601 .flags = &tracer_flags,
596 .set_flag = wakeup_set_flag, 602 .set_flag = wakeup_set_flag,
603 .flag_changed = trace_keep_overwrite,
597#ifdef CONFIG_FTRACE_SELFTEST 604#ifdef CONFIG_FTRACE_SELFTEST
598 .selftest = trace_selftest_startup_wakeup, 605 .selftest = trace_selftest_startup_wakeup,
599#endif 606#endif
@@ -615,6 +622,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
615 .print_line = wakeup_print_line, 622 .print_line = wakeup_print_line,
616 .flags = &tracer_flags, 623 .flags = &tracer_flags,
617 .set_flag = wakeup_set_flag, 624 .set_flag = wakeup_set_flag,
625 .flag_changed = trace_keep_overwrite,
618#ifdef CONFIG_FTRACE_SELFTEST 626#ifdef CONFIG_FTRACE_SELFTEST
619 .selftest = trace_selftest_startup_wakeup, 627 .selftest = trace_selftest_startup_wakeup,
620#endif 628#endif
diff --git a/kernel/user.c b/kernel/user.c
index e81978e8c03b..8e635a18ab52 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -51,6 +51,8 @@ struct user_namespace init_user_ns = {
51 .owner = GLOBAL_ROOT_UID, 51 .owner = GLOBAL_ROOT_UID,
52 .group = GLOBAL_ROOT_GID, 52 .group = GLOBAL_ROOT_GID,
53 .proc_inum = PROC_USER_INIT_INO, 53 .proc_inum = PROC_USER_INIT_INO,
54 .may_mount_sysfs = true,
55 .may_mount_proc = true,
54}; 56};
55EXPORT_SYMBOL_GPL(init_user_ns); 57EXPORT_SYMBOL_GPL(init_user_ns);
56 58
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index b14f4d342043..a54f26f82eb2 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -61,6 +61,15 @@ int create_user_ns(struct cred *new)
61 kgid_t group = new->egid; 61 kgid_t group = new->egid;
62 int ret; 62 int ret;
63 63
64 /*
65 * Verify that we can not violate the policy of which files
66 * may be accessed that is specified by the root directory,
67 * by verifing that the root directory is at the root of the
68 * mount namespace which allows all files to be accessed.
69 */
70 if (current_chrooted())
71 return -EPERM;
72
64 /* The creator needs a mapping in the parent user namespace 73 /* The creator needs a mapping in the parent user namespace
65 * or else we won't be able to reasonably tell userspace who 74 * or else we won't be able to reasonably tell userspace who
66 * created a user_namespace. 75 * created a user_namespace.
@@ -87,6 +96,8 @@ int create_user_ns(struct cred *new)
87 96
88 set_cred_user_ns(new, ns); 97 set_cred_user_ns(new, ns);
89 98
99 update_mnt_policy(ns);
100
90 return 0; 101 return 0;
91} 102}
92 103
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 55fac5b991b7..dd2a4c49a39a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,7 +41,11 @@
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/lockdep.h> 42#include <linux/lockdep.h>
43#include <linux/idr.h> 43#include <linux/idr.h>
44#include <linux/jhash.h>
44#include <linux/hashtable.h> 45#include <linux/hashtable.h>
46#include <linux/rculist.h>
47#include <linux/nodemask.h>
48#include <linux/moduleparam.h>
45 49
46#include "workqueue_internal.h" 50#include "workqueue_internal.h"
47 51
@@ -58,12 +62,11 @@ enum {
58 * %WORKER_UNBOUND set and concurrency management disabled, and may 62 * %WORKER_UNBOUND set and concurrency management disabled, and may
59 * be executing on any CPU. The pool behaves as an unbound one. 63 * be executing on any CPU. The pool behaves as an unbound one.
60 * 64 *
61 * Note that DISASSOCIATED can be flipped only while holding 65 * Note that DISASSOCIATED should be flipped only while holding
62 * assoc_mutex to avoid changing binding state while 66 * manager_mutex to avoid changing binding state while
63 * create_worker() is in progress. 67 * create_worker() is in progress.
64 */ 68 */
65 POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 69 POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
66 POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */
67 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 70 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
68 POOL_FREEZING = 1 << 3, /* freeze in progress */ 71 POOL_FREEZING = 1 << 3, /* freeze in progress */
69 72
@@ -74,12 +77,14 @@ enum {
74 WORKER_PREP = 1 << 3, /* preparing to run works */ 77 WORKER_PREP = 1 << 3, /* preparing to run works */
75 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 78 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
76 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 79 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
80 WORKER_REBOUND = 1 << 8, /* worker was rebound */
77 81
78 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND | 82 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
79 WORKER_CPU_INTENSIVE, 83 WORKER_UNBOUND | WORKER_REBOUND,
80 84
81 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 85 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
82 86
87 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
83 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 88 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
84 89
85 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 90 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
@@ -97,6 +102,8 @@ enum {
97 */ 102 */
98 RESCUER_NICE_LEVEL = -20, 103 RESCUER_NICE_LEVEL = -20,
99 HIGHPRI_NICE_LEVEL = -20, 104 HIGHPRI_NICE_LEVEL = -20,
105
106 WQ_NAME_LEN = 24,
100}; 107};
101 108
102/* 109/*
@@ -115,16 +122,26 @@ enum {
115 * cpu or grabbing pool->lock is enough for read access. If 122 * cpu or grabbing pool->lock is enough for read access. If
116 * POOL_DISASSOCIATED is set, it's identical to L. 123 * POOL_DISASSOCIATED is set, it's identical to L.
117 * 124 *
118 * F: wq->flush_mutex protected. 125 * MG: pool->manager_mutex and pool->lock protected. Writes require both
126 * locks. Reads can happen under either lock.
127 *
128 * PL: wq_pool_mutex protected.
129 *
130 * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
131 *
132 * WQ: wq->mutex protected.
119 * 133 *
120 * W: workqueue_lock protected. 134 * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
135 *
136 * MD: wq_mayday_lock protected.
121 */ 137 */
122 138
123/* struct worker is defined in workqueue_internal.h */ 139/* struct worker is defined in workqueue_internal.h */
124 140
125struct worker_pool { 141struct worker_pool {
126 spinlock_t lock; /* the pool lock */ 142 spinlock_t lock; /* the pool lock */
127 unsigned int cpu; /* I: the associated cpu */ 143 int cpu; /* I: the associated cpu */
144 int node; /* I: the associated node ID */
128 int id; /* I: pool ID */ 145 int id; /* I: pool ID */
129 unsigned int flags; /* X: flags */ 146 unsigned int flags; /* X: flags */
130 147
@@ -138,12 +155,18 @@ struct worker_pool {
138 struct timer_list idle_timer; /* L: worker idle timeout */ 155 struct timer_list idle_timer; /* L: worker idle timeout */
139 struct timer_list mayday_timer; /* L: SOS timer for workers */ 156 struct timer_list mayday_timer; /* L: SOS timer for workers */
140 157
141 /* workers are chained either in busy_hash or idle_list */ 158 /* a workers is either on busy_hash or idle_list, or the manager */
142 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 159 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
143 /* L: hash of busy workers */ 160 /* L: hash of busy workers */
144 161
145 struct mutex assoc_mutex; /* protect POOL_DISASSOCIATED */ 162 /* see manage_workers() for details on the two manager mutexes */
146 struct ida worker_ida; /* L: for worker IDs */ 163 struct mutex manager_arb; /* manager arbitration */
164 struct mutex manager_mutex; /* manager exclusion */
165 struct idr worker_idr; /* MG: worker IDs and iteration */
166
167 struct workqueue_attrs *attrs; /* I: worker attributes */
168 struct hlist_node hash_node; /* PL: unbound_pool_hash node */
169 int refcnt; /* PL: refcnt for unbound pools */
147 170
148 /* 171 /*
149 * The current concurrency level. As it's likely to be accessed 172 * The current concurrency level. As it's likely to be accessed
@@ -151,6 +174,12 @@ struct worker_pool {
151 * cacheline. 174 * cacheline.
152 */ 175 */
153 atomic_t nr_running ____cacheline_aligned_in_smp; 176 atomic_t nr_running ____cacheline_aligned_in_smp;
177
178 /*
179 * Destruction of pool is sched-RCU protected to allow dereferences
180 * from get_work_pool().
181 */
182 struct rcu_head rcu;
154} ____cacheline_aligned_in_smp; 183} ____cacheline_aligned_in_smp;
155 184
156/* 185/*
@@ -164,75 +193,107 @@ struct pool_workqueue {
164 struct workqueue_struct *wq; /* I: the owning workqueue */ 193 struct workqueue_struct *wq; /* I: the owning workqueue */
165 int work_color; /* L: current color */ 194 int work_color; /* L: current color */
166 int flush_color; /* L: flushing color */ 195 int flush_color; /* L: flushing color */
196 int refcnt; /* L: reference count */
167 int nr_in_flight[WORK_NR_COLORS]; 197 int nr_in_flight[WORK_NR_COLORS];
168 /* L: nr of in_flight works */ 198 /* L: nr of in_flight works */
169 int nr_active; /* L: nr of active works */ 199 int nr_active; /* L: nr of active works */
170 int max_active; /* L: max active works */ 200 int max_active; /* L: max active works */
171 struct list_head delayed_works; /* L: delayed works */ 201 struct list_head delayed_works; /* L: delayed works */
172}; 202 struct list_head pwqs_node; /* WR: node on wq->pwqs */
203 struct list_head mayday_node; /* MD: node on wq->maydays */
204
205 /*
206 * Release of unbound pwq is punted to system_wq. See put_pwq()
207 * and pwq_unbound_release_workfn() for details. pool_workqueue
208 * itself is also sched-RCU protected so that the first pwq can be
209 * determined without grabbing wq->mutex.
210 */
211 struct work_struct unbound_release_work;
212 struct rcu_head rcu;
213} __aligned(1 << WORK_STRUCT_FLAG_BITS);
173 214
174/* 215/*
175 * Structure used to wait for workqueue flush. 216 * Structure used to wait for workqueue flush.
176 */ 217 */
177struct wq_flusher { 218struct wq_flusher {
178 struct list_head list; /* F: list of flushers */ 219 struct list_head list; /* WQ: list of flushers */
179 int flush_color; /* F: flush color waiting for */ 220 int flush_color; /* WQ: flush color waiting for */
180 struct completion done; /* flush completion */ 221 struct completion done; /* flush completion */
181}; 222};
182 223
183/* 224struct wq_device;
184 * All cpumasks are assumed to be always set on UP and thus can't be
185 * used to determine whether there's something to be done.
186 */
187#ifdef CONFIG_SMP
188typedef cpumask_var_t mayday_mask_t;
189#define mayday_test_and_set_cpu(cpu, mask) \
190 cpumask_test_and_set_cpu((cpu), (mask))
191#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
192#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
193#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
194#define free_mayday_mask(mask) free_cpumask_var((mask))
195#else
196typedef unsigned long mayday_mask_t;
197#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
198#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
199#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
200#define alloc_mayday_mask(maskp, gfp) true
201#define free_mayday_mask(mask) do { } while (0)
202#endif
203 225
204/* 226/*
205 * The externally visible workqueue abstraction is an array of 227 * The externally visible workqueue. It relays the issued work items to
206 * per-CPU workqueues: 228 * the appropriate worker_pool through its pool_workqueues.
207 */ 229 */
208struct workqueue_struct { 230struct workqueue_struct {
209 unsigned int flags; /* W: WQ_* flags */ 231 struct list_head pwqs; /* WR: all pwqs of this wq */
210 union { 232 struct list_head list; /* PL: list of all workqueues */
211 struct pool_workqueue __percpu *pcpu; 233
212 struct pool_workqueue *single; 234 struct mutex mutex; /* protects this wq */
213 unsigned long v; 235 int work_color; /* WQ: current work color */
214 } pool_wq; /* I: pwq's */ 236 int flush_color; /* WQ: current flush color */
215 struct list_head list; /* W: list of all workqueues */
216
217 struct mutex flush_mutex; /* protects wq flushing */
218 int work_color; /* F: current work color */
219 int flush_color; /* F: current flush color */
220 atomic_t nr_pwqs_to_flush; /* flush in progress */ 237 atomic_t nr_pwqs_to_flush; /* flush in progress */
221 struct wq_flusher *first_flusher; /* F: first flusher */ 238 struct wq_flusher *first_flusher; /* WQ: first flusher */
222 struct list_head flusher_queue; /* F: flush waiters */ 239 struct list_head flusher_queue; /* WQ: flush waiters */
223 struct list_head flusher_overflow; /* F: flush overflow list */ 240 struct list_head flusher_overflow; /* WQ: flush overflow list */
224 241
225 mayday_mask_t mayday_mask; /* cpus requesting rescue */ 242 struct list_head maydays; /* MD: pwqs requesting rescue */
226 struct worker *rescuer; /* I: rescue worker */ 243 struct worker *rescuer; /* I: rescue worker */
227 244
228 int nr_drainers; /* W: drain in progress */ 245 int nr_drainers; /* WQ: drain in progress */
229 int saved_max_active; /* W: saved pwq max_active */ 246 int saved_max_active; /* WQ: saved pwq max_active */
247
248 struct workqueue_attrs *unbound_attrs; /* WQ: only for unbound wqs */
249 struct pool_workqueue *dfl_pwq; /* WQ: only for unbound wqs */
250
251#ifdef CONFIG_SYSFS
252 struct wq_device *wq_dev; /* I: for sysfs interface */
253#endif
230#ifdef CONFIG_LOCKDEP 254#ifdef CONFIG_LOCKDEP
231 struct lockdep_map lockdep_map; 255 struct lockdep_map lockdep_map;
232#endif 256#endif
233 char name[]; /* I: workqueue name */ 257 char name[WQ_NAME_LEN]; /* I: workqueue name */
258
259 /* hot fields used during command issue, aligned to cacheline */
260 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
261 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
262 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */
234}; 263};
235 264
265static struct kmem_cache *pwq_cache;
266
267static int wq_numa_tbl_len; /* highest possible NUMA node id + 1 */
268static cpumask_var_t *wq_numa_possible_cpumask;
269 /* possible CPUs of each node */
270
271static bool wq_disable_numa;
272module_param_named(disable_numa, wq_disable_numa, bool, 0444);
273
274static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
275
276/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
277static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
278
279static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
280static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
281
282static LIST_HEAD(workqueues); /* PL: list of all workqueues */
283static bool workqueue_freezing; /* PL: have wqs started freezing? */
284
285/* the per-cpu worker pools */
286static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
287 cpu_worker_pools);
288
289static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
290
291/* PL: hash of all unbound pools keyed by pool->attrs */
292static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
293
294/* I: attributes used when instantiating standard unbound pools on demand */
295static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
296
236struct workqueue_struct *system_wq __read_mostly; 297struct workqueue_struct *system_wq __read_mostly;
237EXPORT_SYMBOL_GPL(system_wq); 298EXPORT_SYMBOL_GPL(system_wq);
238struct workqueue_struct *system_highpri_wq __read_mostly; 299struct workqueue_struct *system_highpri_wq __read_mostly;
@@ -244,64 +305,87 @@ EXPORT_SYMBOL_GPL(system_unbound_wq);
244struct workqueue_struct *system_freezable_wq __read_mostly; 305struct workqueue_struct *system_freezable_wq __read_mostly;
245EXPORT_SYMBOL_GPL(system_freezable_wq); 306EXPORT_SYMBOL_GPL(system_freezable_wq);
246 307
308static int worker_thread(void *__worker);
309static void copy_workqueue_attrs(struct workqueue_attrs *to,
310 const struct workqueue_attrs *from);
311
247#define CREATE_TRACE_POINTS 312#define CREATE_TRACE_POINTS
248#include <trace/events/workqueue.h> 313#include <trace/events/workqueue.h>
249 314
250#define for_each_std_worker_pool(pool, cpu) \ 315#define assert_rcu_or_pool_mutex() \
251 for ((pool) = &std_worker_pools(cpu)[0]; \ 316 rcu_lockdep_assert(rcu_read_lock_sched_held() || \
252 (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++) 317 lockdep_is_held(&wq_pool_mutex), \
318 "sched RCU or wq_pool_mutex should be held")
253 319
254#define for_each_busy_worker(worker, i, pool) \ 320#define assert_rcu_or_wq_mutex(wq) \
255 hash_for_each(pool->busy_hash, i, worker, hentry) 321 rcu_lockdep_assert(rcu_read_lock_sched_held() || \
322 lockdep_is_held(&wq->mutex), \
323 "sched RCU or wq->mutex should be held")
256 324
257static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, 325#ifdef CONFIG_LOCKDEP
258 unsigned int sw) 326#define assert_manager_or_pool_lock(pool) \
259{ 327 WARN_ONCE(debug_locks && \
260 if (cpu < nr_cpu_ids) { 328 !lockdep_is_held(&(pool)->manager_mutex) && \
261 if (sw & 1) { 329 !lockdep_is_held(&(pool)->lock), \
262 cpu = cpumask_next(cpu, mask); 330 "pool->manager_mutex or ->lock should be held")
263 if (cpu < nr_cpu_ids) 331#else
264 return cpu; 332#define assert_manager_or_pool_lock(pool) do { } while (0)
265 } 333#endif
266 if (sw & 2)
267 return WORK_CPU_UNBOUND;
268 }
269 return WORK_CPU_END;
270}
271 334
272static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask, 335#define for_each_cpu_worker_pool(pool, cpu) \
273 struct workqueue_struct *wq) 336 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
274{ 337 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
275 return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); 338 (pool)++)
276}
277 339
278/* 340/**
279 * CPU iterators 341 * for_each_pool - iterate through all worker_pools in the system
342 * @pool: iteration cursor
343 * @pi: integer used for iteration
280 * 344 *
281 * An extra cpu number is defined using an invalid cpu number 345 * This must be called either with wq_pool_mutex held or sched RCU read
282 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any 346 * locked. If the pool needs to be used beyond the locking in effect, the
283 * specific CPU. The following iterators are similar to for_each_*_cpu() 347 * caller is responsible for guaranteeing that the pool stays online.
284 * iterators but also considers the unbound CPU.
285 * 348 *
286 * for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND 349 * The if/else clause exists only for the lockdep assertion and can be
287 * for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND 350 * ignored.
288 * for_each_pwq_cpu() : possible CPUs for bound workqueues,
289 * WORK_CPU_UNBOUND for unbound workqueues
290 */ 351 */
291#define for_each_wq_cpu(cpu) \ 352#define for_each_pool(pool, pi) \
292 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3); \ 353 idr_for_each_entry(&worker_pool_idr, pool, pi) \
293 (cpu) < WORK_CPU_END; \ 354 if (({ assert_rcu_or_pool_mutex(); false; })) { } \
294 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, 3)) 355 else
295 356
296#define for_each_online_wq_cpu(cpu) \ 357/**
297 for ((cpu) = __next_wq_cpu(-1, cpu_online_mask, 3); \ 358 * for_each_pool_worker - iterate through all workers of a worker_pool
298 (cpu) < WORK_CPU_END; \ 359 * @worker: iteration cursor
299 (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3)) 360 * @wi: integer used for iteration
361 * @pool: worker_pool to iterate workers of
362 *
363 * This must be called with either @pool->manager_mutex or ->lock held.
364 *
365 * The if/else clause exists only for the lockdep assertion and can be
366 * ignored.
367 */
368#define for_each_pool_worker(worker, wi, pool) \
369 idr_for_each_entry(&(pool)->worker_idr, (worker), (wi)) \
370 if (({ assert_manager_or_pool_lock((pool)); false; })) { } \
371 else
300 372
301#define for_each_pwq_cpu(cpu, wq) \ 373/**
302 for ((cpu) = __next_pwq_cpu(-1, cpu_possible_mask, (wq)); \ 374 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
303 (cpu) < WORK_CPU_END; \ 375 * @pwq: iteration cursor
304 (cpu) = __next_pwq_cpu((cpu), cpu_possible_mask, (wq))) 376 * @wq: the target workqueue
377 *
378 * This must be called either with wq->mutex held or sched RCU read locked.
379 * If the pwq needs to be used beyond the locking in effect, the caller is
380 * responsible for guaranteeing that the pwq stays online.
381 *
382 * The if/else clause exists only for the lockdep assertion and can be
383 * ignored.
384 */
385#define for_each_pwq(pwq, wq) \
386 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
387 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
388 else
305 389
306#ifdef CONFIG_DEBUG_OBJECTS_WORK 390#ifdef CONFIG_DEBUG_OBJECTS_WORK
307 391
@@ -419,77 +503,35 @@ static inline void debug_work_activate(struct work_struct *work) { }
419static inline void debug_work_deactivate(struct work_struct *work) { } 503static inline void debug_work_deactivate(struct work_struct *work) { }
420#endif 504#endif
421 505
422/* Serializes the accesses to the list of workqueues. */
423static DEFINE_SPINLOCK(workqueue_lock);
424static LIST_HEAD(workqueues);
425static bool workqueue_freezing; /* W: have wqs started freezing? */
426
427/*
428 * The CPU and unbound standard worker pools. The unbound ones have
429 * POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set.
430 */
431static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
432 cpu_std_worker_pools);
433static struct worker_pool unbound_std_worker_pools[NR_STD_WORKER_POOLS];
434
435/* idr of all pools */
436static DEFINE_MUTEX(worker_pool_idr_mutex);
437static DEFINE_IDR(worker_pool_idr);
438
439static int worker_thread(void *__worker);
440
441static struct worker_pool *std_worker_pools(int cpu)
442{
443 if (cpu != WORK_CPU_UNBOUND)
444 return per_cpu(cpu_std_worker_pools, cpu);
445 else
446 return unbound_std_worker_pools;
447}
448
449static int std_worker_pool_pri(struct worker_pool *pool)
450{
451 return pool - std_worker_pools(pool->cpu);
452}
453
454/* allocate ID and assign it to @pool */ 506/* allocate ID and assign it to @pool */
455static int worker_pool_assign_id(struct worker_pool *pool) 507static int worker_pool_assign_id(struct worker_pool *pool)
456{ 508{
457 int ret; 509 int ret;
458 510
459 mutex_lock(&worker_pool_idr_mutex); 511 lockdep_assert_held(&wq_pool_mutex);
512
460 ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); 513 ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
461 if (ret >= 0) 514 if (ret >= 0) {
462 pool->id = ret; 515 pool->id = ret;
463 mutex_unlock(&worker_pool_idr_mutex); 516 return 0;
464 517 }
465 return ret < 0 ? ret : 0; 518 return ret;
466} 519}
467 520
468/* 521/**
469 * Lookup worker_pool by id. The idr currently is built during boot and 522 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
470 * never modified. Don't worry about locking for now. 523 * @wq: the target workqueue
524 * @node: the node ID
525 *
526 * This must be called either with pwq_lock held or sched RCU read locked.
527 * If the pwq needs to be used beyond the locking in effect, the caller is
528 * responsible for guaranteeing that the pwq stays online.
471 */ 529 */
472static struct worker_pool *worker_pool_by_id(int pool_id) 530static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
473{ 531 int node)
474 return idr_find(&worker_pool_idr, pool_id);
475}
476
477static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
478{
479 struct worker_pool *pools = std_worker_pools(cpu);
480
481 return &pools[highpri];
482}
483
484static struct pool_workqueue *get_pwq(unsigned int cpu,
485 struct workqueue_struct *wq)
486{ 532{
487 if (!(wq->flags & WQ_UNBOUND)) { 533 assert_rcu_or_wq_mutex(wq);
488 if (likely(cpu < nr_cpu_ids)) 534 return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
489 return per_cpu_ptr(wq->pool_wq.pcpu, cpu);
490 } else if (likely(cpu == WORK_CPU_UNBOUND))
491 return wq->pool_wq.single;
492 return NULL;
493} 535}
494 536
495static unsigned int work_color_to_flags(int color) 537static unsigned int work_color_to_flags(int color)
@@ -531,7 +573,7 @@ static int work_next_color(int color)
531static inline void set_work_data(struct work_struct *work, unsigned long data, 573static inline void set_work_data(struct work_struct *work, unsigned long data,
532 unsigned long flags) 574 unsigned long flags)
533{ 575{
534 BUG_ON(!work_pending(work)); 576 WARN_ON_ONCE(!work_pending(work));
535 atomic_long_set(&work->data, data | flags | work_static(work)); 577 atomic_long_set(&work->data, data | flags | work_static(work));
536} 578}
537 579
@@ -583,13 +625,23 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
583 * @work: the work item of interest 625 * @work: the work item of interest
584 * 626 *
585 * Return the worker_pool @work was last associated with. %NULL if none. 627 * Return the worker_pool @work was last associated with. %NULL if none.
628 *
629 * Pools are created and destroyed under wq_pool_mutex, and allows read
630 * access under sched-RCU read lock. As such, this function should be
631 * called under wq_pool_mutex or with preemption disabled.
632 *
633 * All fields of the returned pool are accessible as long as the above
634 * mentioned locking is in effect. If the returned pool needs to be used
635 * beyond the critical section, the caller is responsible for ensuring the
636 * returned pool is and stays online.
586 */ 637 */
587static struct worker_pool *get_work_pool(struct work_struct *work) 638static struct worker_pool *get_work_pool(struct work_struct *work)
588{ 639{
589 unsigned long data = atomic_long_read(&work->data); 640 unsigned long data = atomic_long_read(&work->data);
590 struct worker_pool *pool;
591 int pool_id; 641 int pool_id;
592 642
643 assert_rcu_or_pool_mutex();
644
593 if (data & WORK_STRUCT_PWQ) 645 if (data & WORK_STRUCT_PWQ)
594 return ((struct pool_workqueue *) 646 return ((struct pool_workqueue *)
595 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; 647 (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
@@ -598,9 +650,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
598 if (pool_id == WORK_OFFQ_POOL_NONE) 650 if (pool_id == WORK_OFFQ_POOL_NONE)
599 return NULL; 651 return NULL;
600 652
601 pool = worker_pool_by_id(pool_id); 653 return idr_find(&worker_pool_idr, pool_id);
602 WARN_ON_ONCE(!pool);
603 return pool;
604} 654}
605 655
606/** 656/**
@@ -689,7 +739,7 @@ static bool need_to_manage_workers(struct worker_pool *pool)
689/* Do we have too many workers and should some go away? */ 739/* Do we have too many workers and should some go away? */
690static bool too_many_workers(struct worker_pool *pool) 740static bool too_many_workers(struct worker_pool *pool)
691{ 741{
692 bool managing = pool->flags & POOL_MANAGING_WORKERS; 742 bool managing = mutex_is_locked(&pool->manager_arb);
693 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 743 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
694 int nr_busy = pool->nr_workers - nr_idle; 744 int nr_busy = pool->nr_workers - nr_idle;
695 745
@@ -744,7 +794,7 @@ static void wake_up_worker(struct worker_pool *pool)
744 * CONTEXT: 794 * CONTEXT:
745 * spin_lock_irq(rq->lock) 795 * spin_lock_irq(rq->lock)
746 */ 796 */
747void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) 797void wq_worker_waking_up(struct task_struct *task, int cpu)
748{ 798{
749 struct worker *worker = kthread_data(task); 799 struct worker *worker = kthread_data(task);
750 800
@@ -769,8 +819,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
769 * RETURNS: 819 * RETURNS:
770 * Worker task on @cpu to wake up, %NULL if none. 820 * Worker task on @cpu to wake up, %NULL if none.
771 */ 821 */
772struct task_struct *wq_worker_sleeping(struct task_struct *task, 822struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
773 unsigned int cpu)
774{ 823{
775 struct worker *worker = kthread_data(task), *to_wakeup = NULL; 824 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
776 struct worker_pool *pool; 825 struct worker_pool *pool;
@@ -786,7 +835,8 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
786 pool = worker->pool; 835 pool = worker->pool;
787 836
788 /* this can only happen on the local cpu */ 837 /* this can only happen on the local cpu */
789 BUG_ON(cpu != raw_smp_processor_id()); 838 if (WARN_ON_ONCE(cpu != raw_smp_processor_id()))
839 return NULL;
790 840
791 /* 841 /*
792 * The counterpart of the following dec_and_test, implied mb, 842 * The counterpart of the following dec_and_test, implied mb,
@@ -891,13 +941,12 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
891 * recycled work item as currently executing and make it wait until the 941 * recycled work item as currently executing and make it wait until the
892 * current execution finishes, introducing an unwanted dependency. 942 * current execution finishes, introducing an unwanted dependency.
893 * 943 *
894 * This function checks the work item address, work function and workqueue 944 * This function checks the work item address and work function to avoid
895 * to avoid false positives. Note that this isn't complete as one may 945 * false positives. Note that this isn't complete as one may construct a
896 * construct a work function which can introduce dependency onto itself 946 * work function which can introduce dependency onto itself through a
897 * through a recycled work item. Well, if somebody wants to shoot oneself 947 * recycled work item. Well, if somebody wants to shoot oneself in the
898 * in the foot that badly, there's only so much we can do, and if such 948 * foot that badly, there's only so much we can do, and if such deadlock
899 * deadlock actually occurs, it should be easy to locate the culprit work 949 * actually occurs, it should be easy to locate the culprit work function.
900 * function.
901 * 950 *
902 * CONTEXT: 951 * CONTEXT:
903 * spin_lock_irq(pool->lock). 952 * spin_lock_irq(pool->lock).
@@ -961,6 +1010,64 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
961 *nextp = n; 1010 *nextp = n;
962} 1011}
963 1012
1013/**
1014 * get_pwq - get an extra reference on the specified pool_workqueue
1015 * @pwq: pool_workqueue to get
1016 *
1017 * Obtain an extra reference on @pwq. The caller should guarantee that
1018 * @pwq has positive refcnt and be holding the matching pool->lock.
1019 */
1020static void get_pwq(struct pool_workqueue *pwq)
1021{
1022 lockdep_assert_held(&pwq->pool->lock);
1023 WARN_ON_ONCE(pwq->refcnt <= 0);
1024 pwq->refcnt++;
1025}
1026
1027/**
1028 * put_pwq - put a pool_workqueue reference
1029 * @pwq: pool_workqueue to put
1030 *
1031 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
1032 * destruction. The caller should be holding the matching pool->lock.
1033 */
1034static void put_pwq(struct pool_workqueue *pwq)
1035{
1036 lockdep_assert_held(&pwq->pool->lock);
1037 if (likely(--pwq->refcnt))
1038 return;
1039 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1040 return;
1041 /*
1042 * @pwq can't be released under pool->lock, bounce to
1043 * pwq_unbound_release_workfn(). This never recurses on the same
1044 * pool->lock as this path is taken only for unbound workqueues and
1045 * the release work item is scheduled on a per-cpu workqueue. To
1046 * avoid lockdep warning, unbound pool->locks are given lockdep
1047 * subclass of 1 in get_unbound_pool().
1048 */
1049 schedule_work(&pwq->unbound_release_work);
1050}
1051
1052/**
1053 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1054 * @pwq: pool_workqueue to put (can be %NULL)
1055 *
1056 * put_pwq() with locking. This function also allows %NULL @pwq.
1057 */
1058static void put_pwq_unlocked(struct pool_workqueue *pwq)
1059{
1060 if (pwq) {
1061 /*
1062 * As both pwqs and pools are sched-RCU protected, the
1063 * following lock operations are safe.
1064 */
1065 spin_lock_irq(&pwq->pool->lock);
1066 put_pwq(pwq);
1067 spin_unlock_irq(&pwq->pool->lock);
1068 }
1069}
1070
964static void pwq_activate_delayed_work(struct work_struct *work) 1071static void pwq_activate_delayed_work(struct work_struct *work)
965{ 1072{
966 struct pool_workqueue *pwq = get_work_pwq(work); 1073 struct pool_workqueue *pwq = get_work_pwq(work);
@@ -992,9 +1099,9 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
992 */ 1099 */
993static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) 1100static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
994{ 1101{
995 /* ignore uncolored works */ 1102 /* uncolored work items don't participate in flushing or nr_active */
996 if (color == WORK_NO_COLOR) 1103 if (color == WORK_NO_COLOR)
997 return; 1104 goto out_put;
998 1105
999 pwq->nr_in_flight[color]--; 1106 pwq->nr_in_flight[color]--;
1000 1107
@@ -1007,11 +1114,11 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1007 1114
1008 /* is flush in progress and are we at the flushing tip? */ 1115 /* is flush in progress and are we at the flushing tip? */
1009 if (likely(pwq->flush_color != color)) 1116 if (likely(pwq->flush_color != color))
1010 return; 1117 goto out_put;
1011 1118
1012 /* are there still in-flight works? */ 1119 /* are there still in-flight works? */
1013 if (pwq->nr_in_flight[color]) 1120 if (pwq->nr_in_flight[color])
1014 return; 1121 goto out_put;
1015 1122
1016 /* this pwq is done, clear flush_color */ 1123 /* this pwq is done, clear flush_color */
1017 pwq->flush_color = -1; 1124 pwq->flush_color = -1;
@@ -1022,6 +1129,8 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1022 */ 1129 */
1023 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 1130 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1024 complete(&pwq->wq->first_flusher->done); 1131 complete(&pwq->wq->first_flusher->done);
1132out_put:
1133 put_pwq(pwq);
1025} 1134}
1026 1135
1027/** 1136/**
@@ -1144,11 +1253,12 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1144 /* we own @work, set data and link */ 1253 /* we own @work, set data and link */
1145 set_work_pwq(work, pwq, extra_flags); 1254 set_work_pwq(work, pwq, extra_flags);
1146 list_add_tail(&work->entry, head); 1255 list_add_tail(&work->entry, head);
1256 get_pwq(pwq);
1147 1257
1148 /* 1258 /*
1149 * Ensure either worker_sched_deactivated() sees the above 1259 * Ensure either wq_worker_sleeping() sees the above
1150 * list_add_tail() or we see zero nr_running to avoid workers 1260 * list_add_tail() or we see zero nr_running to avoid workers lying
1151 * lying around lazily while there are works to be processed. 1261 * around lazily while there are works to be processed.
1152 */ 1262 */
1153 smp_mb(); 1263 smp_mb();
1154 1264
@@ -1172,10 +1282,11 @@ static bool is_chained_work(struct workqueue_struct *wq)
1172 return worker && worker->current_pwq->wq == wq; 1282 return worker && worker->current_pwq->wq == wq;
1173} 1283}
1174 1284
1175static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, 1285static void __queue_work(int cpu, struct workqueue_struct *wq,
1176 struct work_struct *work) 1286 struct work_struct *work)
1177{ 1287{
1178 struct pool_workqueue *pwq; 1288 struct pool_workqueue *pwq;
1289 struct worker_pool *last_pool;
1179 struct list_head *worklist; 1290 struct list_head *worklist;
1180 unsigned int work_flags; 1291 unsigned int work_flags;
1181 unsigned int req_cpu = cpu; 1292 unsigned int req_cpu = cpu;
@@ -1191,48 +1302,62 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1191 debug_work_activate(work); 1302 debug_work_activate(work);
1192 1303
1193 /* if dying, only works from the same workqueue are allowed */ 1304 /* if dying, only works from the same workqueue are allowed */
1194 if (unlikely(wq->flags & WQ_DRAINING) && 1305 if (unlikely(wq->flags & __WQ_DRAINING) &&
1195 WARN_ON_ONCE(!is_chained_work(wq))) 1306 WARN_ON_ONCE(!is_chained_work(wq)))
1196 return; 1307 return;
1308retry:
1309 if (req_cpu == WORK_CPU_UNBOUND)
1310 cpu = raw_smp_processor_id();
1197 1311
1198 /* determine the pwq to use */ 1312 /* pwq which will be used unless @work is executing elsewhere */
1199 if (!(wq->flags & WQ_UNBOUND)) { 1313 if (!(wq->flags & WQ_UNBOUND))
1200 struct worker_pool *last_pool; 1314 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1201 1315 else
1202 if (cpu == WORK_CPU_UNBOUND) 1316 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1203 cpu = raw_smp_processor_id();
1204
1205 /*
1206 * It's multi cpu. If @work was previously on a different
1207 * cpu, it might still be running there, in which case the
1208 * work needs to be queued on that cpu to guarantee
1209 * non-reentrancy.
1210 */
1211 pwq = get_pwq(cpu, wq);
1212 last_pool = get_work_pool(work);
1213 1317
1214 if (last_pool && last_pool != pwq->pool) { 1318 /*
1215 struct worker *worker; 1319 * If @work was previously on a different pool, it might still be
1320 * running there, in which case the work needs to be queued on that
1321 * pool to guarantee non-reentrancy.
1322 */
1323 last_pool = get_work_pool(work);
1324 if (last_pool && last_pool != pwq->pool) {
1325 struct worker *worker;
1216 1326
1217 spin_lock(&last_pool->lock); 1327 spin_lock(&last_pool->lock);
1218 1328
1219 worker = find_worker_executing_work(last_pool, work); 1329 worker = find_worker_executing_work(last_pool, work);
1220 1330
1221 if (worker && worker->current_pwq->wq == wq) { 1331 if (worker && worker->current_pwq->wq == wq) {
1222 pwq = get_pwq(last_pool->cpu, wq); 1332 pwq = worker->current_pwq;
1223 } else {
1224 /* meh... not running there, queue here */
1225 spin_unlock(&last_pool->lock);
1226 spin_lock(&pwq->pool->lock);
1227 }
1228 } else { 1333 } else {
1334 /* meh... not running there, queue here */
1335 spin_unlock(&last_pool->lock);
1229 spin_lock(&pwq->pool->lock); 1336 spin_lock(&pwq->pool->lock);
1230 } 1337 }
1231 } else { 1338 } else {
1232 pwq = get_pwq(WORK_CPU_UNBOUND, wq);
1233 spin_lock(&pwq->pool->lock); 1339 spin_lock(&pwq->pool->lock);
1234 } 1340 }
1235 1341
1342 /*
1343 * pwq is determined and locked. For unbound pools, we could have
1344 * raced with pwq release and it could already be dead. If its
1345 * refcnt is zero, repeat pwq selection. Note that pwqs never die
1346 * without another pwq replacing it in the numa_pwq_tbl or while
1347 * work items are executing on it, so the retrying is guaranteed to
1348 * make forward-progress.
1349 */
1350 if (unlikely(!pwq->refcnt)) {
1351 if (wq->flags & WQ_UNBOUND) {
1352 spin_unlock(&pwq->pool->lock);
1353 cpu_relax();
1354 goto retry;
1355 }
1356 /* oops */
1357 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1358 wq->name, cpu);
1359 }
1360
1236 /* pwq determined, queue */ 1361 /* pwq determined, queue */
1237 trace_workqueue_queue_work(req_cpu, pwq, work); 1362 trace_workqueue_queue_work(req_cpu, pwq, work);
1238 1363
@@ -1287,22 +1412,6 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
1287} 1412}
1288EXPORT_SYMBOL_GPL(queue_work_on); 1413EXPORT_SYMBOL_GPL(queue_work_on);
1289 1414
1290/**
1291 * queue_work - queue work on a workqueue
1292 * @wq: workqueue to use
1293 * @work: work to queue
1294 *
1295 * Returns %false if @work was already on a queue, %true otherwise.
1296 *
1297 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1298 * it can be processed by another CPU.
1299 */
1300bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
1301{
1302 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
1303}
1304EXPORT_SYMBOL_GPL(queue_work);
1305
1306void delayed_work_timer_fn(unsigned long __data) 1415void delayed_work_timer_fn(unsigned long __data)
1307{ 1416{
1308 struct delayed_work *dwork = (struct delayed_work *)__data; 1417 struct delayed_work *dwork = (struct delayed_work *)__data;
@@ -1378,21 +1487,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1378EXPORT_SYMBOL_GPL(queue_delayed_work_on); 1487EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1379 1488
1380/** 1489/**
1381 * queue_delayed_work - queue work on a workqueue after delay
1382 * @wq: workqueue to use
1383 * @dwork: delayable work to queue
1384 * @delay: number of jiffies to wait before queueing
1385 *
1386 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
1387 */
1388bool queue_delayed_work(struct workqueue_struct *wq,
1389 struct delayed_work *dwork, unsigned long delay)
1390{
1391 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
1392}
1393EXPORT_SYMBOL_GPL(queue_delayed_work);
1394
1395/**
1396 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 1490 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1397 * @cpu: CPU number to execute work on 1491 * @cpu: CPU number to execute work on
1398 * @wq: workqueue to use 1492 * @wq: workqueue to use
@@ -1431,21 +1525,6 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1431EXPORT_SYMBOL_GPL(mod_delayed_work_on); 1525EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1432 1526
1433/** 1527/**
1434 * mod_delayed_work - modify delay of or queue a delayed work
1435 * @wq: workqueue to use
1436 * @dwork: work to queue
1437 * @delay: number of jiffies to wait before queueing
1438 *
1439 * mod_delayed_work_on() on local CPU.
1440 */
1441bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
1442 unsigned long delay)
1443{
1444 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
1445}
1446EXPORT_SYMBOL_GPL(mod_delayed_work);
1447
1448/**
1449 * worker_enter_idle - enter idle state 1528 * worker_enter_idle - enter idle state
1450 * @worker: worker which is entering idle state 1529 * @worker: worker which is entering idle state
1451 * 1530 *
@@ -1459,9 +1538,10 @@ static void worker_enter_idle(struct worker *worker)
1459{ 1538{
1460 struct worker_pool *pool = worker->pool; 1539 struct worker_pool *pool = worker->pool;
1461 1540
1462 BUG_ON(worker->flags & WORKER_IDLE); 1541 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1463 BUG_ON(!list_empty(&worker->entry) && 1542 WARN_ON_ONCE(!list_empty(&worker->entry) &&
1464 (worker->hentry.next || worker->hentry.pprev)); 1543 (worker->hentry.next || worker->hentry.pprev)))
1544 return;
1465 1545
1466 /* can't use worker_set_flags(), also called from start_worker() */ 1546 /* can't use worker_set_flags(), also called from start_worker() */
1467 worker->flags |= WORKER_IDLE; 1547 worker->flags |= WORKER_IDLE;
@@ -1498,22 +1578,25 @@ static void worker_leave_idle(struct worker *worker)
1498{ 1578{
1499 struct worker_pool *pool = worker->pool; 1579 struct worker_pool *pool = worker->pool;
1500 1580
1501 BUG_ON(!(worker->flags & WORKER_IDLE)); 1581 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1582 return;
1502 worker_clr_flags(worker, WORKER_IDLE); 1583 worker_clr_flags(worker, WORKER_IDLE);
1503 pool->nr_idle--; 1584 pool->nr_idle--;
1504 list_del_init(&worker->entry); 1585 list_del_init(&worker->entry);
1505} 1586}
1506 1587
1507/** 1588/**
1508 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool 1589 * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it
1509 * @worker: self 1590 * @pool: target worker_pool
1591 *
1592 * Bind %current to the cpu of @pool if it is associated and lock @pool.
1510 * 1593 *
1511 * Works which are scheduled while the cpu is online must at least be 1594 * Works which are scheduled while the cpu is online must at least be
1512 * scheduled to a worker which is bound to the cpu so that if they are 1595 * scheduled to a worker which is bound to the cpu so that if they are
1513 * flushed from cpu callbacks while cpu is going down, they are 1596 * flushed from cpu callbacks while cpu is going down, they are
1514 * guaranteed to execute on the cpu. 1597 * guaranteed to execute on the cpu.
1515 * 1598 *
1516 * This function is to be used by rogue workers and rescuers to bind 1599 * This function is to be used by unbound workers and rescuers to bind
1517 * themselves to the target cpu and may race with cpu going down or 1600 * themselves to the target cpu and may race with cpu going down or
1518 * coming online. kthread_bind() can't be used because it may put the 1601 * coming online. kthread_bind() can't be used because it may put the
1519 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used 1602 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
@@ -1534,12 +1617,9 @@ static void worker_leave_idle(struct worker *worker)
1534 * %true if the associated pool is online (@worker is successfully 1617 * %true if the associated pool is online (@worker is successfully
1535 * bound), %false if offline. 1618 * bound), %false if offline.
1536 */ 1619 */
1537static bool worker_maybe_bind_and_lock(struct worker *worker) 1620static bool worker_maybe_bind_and_lock(struct worker_pool *pool)
1538__acquires(&pool->lock) 1621__acquires(&pool->lock)
1539{ 1622{
1540 struct worker_pool *pool = worker->pool;
1541 struct task_struct *task = worker->task;
1542
1543 while (true) { 1623 while (true) {
1544 /* 1624 /*
1545 * The following call may fail, succeed or succeed 1625 * The following call may fail, succeed or succeed
@@ -1548,14 +1628,13 @@ __acquires(&pool->lock)
1548 * against POOL_DISASSOCIATED. 1628 * against POOL_DISASSOCIATED.
1549 */ 1629 */
1550 if (!(pool->flags & POOL_DISASSOCIATED)) 1630 if (!(pool->flags & POOL_DISASSOCIATED))
1551 set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu)); 1631 set_cpus_allowed_ptr(current, pool->attrs->cpumask);
1552 1632
1553 spin_lock_irq(&pool->lock); 1633 spin_lock_irq(&pool->lock);
1554 if (pool->flags & POOL_DISASSOCIATED) 1634 if (pool->flags & POOL_DISASSOCIATED)
1555 return false; 1635 return false;
1556 if (task_cpu(task) == pool->cpu && 1636 if (task_cpu(current) == pool->cpu &&
1557 cpumask_equal(&current->cpus_allowed, 1637 cpumask_equal(&current->cpus_allowed, pool->attrs->cpumask))
1558 get_cpu_mask(pool->cpu)))
1559 return true; 1638 return true;
1560 spin_unlock_irq(&pool->lock); 1639 spin_unlock_irq(&pool->lock);
1561 1640
@@ -1570,108 +1649,6 @@ __acquires(&pool->lock)
1570 } 1649 }
1571} 1650}
1572 1651
1573/*
1574 * Rebind an idle @worker to its CPU. worker_thread() will test
1575 * list_empty(@worker->entry) before leaving idle and call this function.
1576 */
1577static void idle_worker_rebind(struct worker *worker)
1578{
1579 /* CPU may go down again inbetween, clear UNBOUND only on success */
1580 if (worker_maybe_bind_and_lock(worker))
1581 worker_clr_flags(worker, WORKER_UNBOUND);
1582
1583 /* rebind complete, become available again */
1584 list_add(&worker->entry, &worker->pool->idle_list);
1585 spin_unlock_irq(&worker->pool->lock);
1586}
1587
1588/*
1589 * Function for @worker->rebind.work used to rebind unbound busy workers to
1590 * the associated cpu which is coming back online. This is scheduled by
1591 * cpu up but can race with other cpu hotplug operations and may be
1592 * executed twice without intervening cpu down.
1593 */
1594static void busy_worker_rebind_fn(struct work_struct *work)
1595{
1596 struct worker *worker = container_of(work, struct worker, rebind_work);
1597
1598 if (worker_maybe_bind_and_lock(worker))
1599 worker_clr_flags(worker, WORKER_UNBOUND);
1600
1601 spin_unlock_irq(&worker->pool->lock);
1602}
1603
1604/**
1605 * rebind_workers - rebind all workers of a pool to the associated CPU
1606 * @pool: pool of interest
1607 *
1608 * @pool->cpu is coming online. Rebind all workers to the CPU. Rebinding
1609 * is different for idle and busy ones.
1610 *
1611 * Idle ones will be removed from the idle_list and woken up. They will
1612 * add themselves back after completing rebind. This ensures that the
1613 * idle_list doesn't contain any unbound workers when re-bound busy workers
1614 * try to perform local wake-ups for concurrency management.
1615 *
1616 * Busy workers can rebind after they finish their current work items.
1617 * Queueing the rebind work item at the head of the scheduled list is
1618 * enough. Note that nr_running will be properly bumped as busy workers
1619 * rebind.
1620 *
1621 * On return, all non-manager workers are scheduled for rebind - see
1622 * manage_workers() for the manager special case. Any idle worker
1623 * including the manager will not appear on @idle_list until rebind is
1624 * complete, making local wake-ups safe.
1625 */
1626static void rebind_workers(struct worker_pool *pool)
1627{
1628 struct worker *worker, *n;
1629 int i;
1630
1631 lockdep_assert_held(&pool->assoc_mutex);
1632 lockdep_assert_held(&pool->lock);
1633
1634 /* dequeue and kick idle ones */
1635 list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
1636 /*
1637 * idle workers should be off @pool->idle_list until rebind
1638 * is complete to avoid receiving premature local wake-ups.
1639 */
1640 list_del_init(&worker->entry);
1641
1642 /*
1643 * worker_thread() will see the above dequeuing and call
1644 * idle_worker_rebind().
1645 */
1646 wake_up_process(worker->task);
1647 }
1648
1649 /* rebind busy workers */
1650 for_each_busy_worker(worker, i, pool) {
1651 struct work_struct *rebind_work = &worker->rebind_work;
1652 struct workqueue_struct *wq;
1653
1654 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
1655 work_data_bits(rebind_work)))
1656 continue;
1657
1658 debug_work_activate(rebind_work);
1659
1660 /*
1661 * wq doesn't really matter but let's keep @worker->pool
1662 * and @pwq->pool consistent for sanity.
1663 */
1664 if (std_worker_pool_pri(worker->pool))
1665 wq = system_highpri_wq;
1666 else
1667 wq = system_wq;
1668
1669 insert_work(get_pwq(pool->cpu, wq), rebind_work,
1670 worker->scheduled.next,
1671 work_color_to_flags(WORK_NO_COLOR));
1672 }
1673}
1674
1675static struct worker *alloc_worker(void) 1652static struct worker *alloc_worker(void)
1676{ 1653{
1677 struct worker *worker; 1654 struct worker *worker;
@@ -1680,7 +1657,6 @@ static struct worker *alloc_worker(void)
1680 if (worker) { 1657 if (worker) {
1681 INIT_LIST_HEAD(&worker->entry); 1658 INIT_LIST_HEAD(&worker->entry);
1682 INIT_LIST_HEAD(&worker->scheduled); 1659 INIT_LIST_HEAD(&worker->scheduled);
1683 INIT_WORK(&worker->rebind_work, busy_worker_rebind_fn);
1684 /* on creation a worker is in !idle && prep state */ 1660 /* on creation a worker is in !idle && prep state */
1685 worker->flags = WORKER_PREP; 1661 worker->flags = WORKER_PREP;
1686 } 1662 }
@@ -1703,18 +1679,25 @@ static struct worker *alloc_worker(void)
1703 */ 1679 */
1704static struct worker *create_worker(struct worker_pool *pool) 1680static struct worker *create_worker(struct worker_pool *pool)
1705{ 1681{
1706 const char *pri = std_worker_pool_pri(pool) ? "H" : "";
1707 struct worker *worker = NULL; 1682 struct worker *worker = NULL;
1708 int id = -1; 1683 int id = -1;
1684 char id_buf[16];
1709 1685
1686 lockdep_assert_held(&pool->manager_mutex);
1687
1688 /*
1689 * ID is needed to determine kthread name. Allocate ID first
1690 * without installing the pointer.
1691 */
1692 idr_preload(GFP_KERNEL);
1710 spin_lock_irq(&pool->lock); 1693 spin_lock_irq(&pool->lock);
1711 while (ida_get_new(&pool->worker_ida, &id)) { 1694
1712 spin_unlock_irq(&pool->lock); 1695 id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_NOWAIT);
1713 if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL)) 1696
1714 goto fail;
1715 spin_lock_irq(&pool->lock);
1716 }
1717 spin_unlock_irq(&pool->lock); 1697 spin_unlock_irq(&pool->lock);
1698 idr_preload_end();
1699 if (id < 0)
1700 goto fail;
1718 1701
1719 worker = alloc_worker(); 1702 worker = alloc_worker();
1720 if (!worker) 1703 if (!worker)
@@ -1723,40 +1706,46 @@ static struct worker *create_worker(struct worker_pool *pool)
1723 worker->pool = pool; 1706 worker->pool = pool;
1724 worker->id = id; 1707 worker->id = id;
1725 1708
1726 if (pool->cpu != WORK_CPU_UNBOUND) 1709 if (pool->cpu >= 0)
1727 worker->task = kthread_create_on_node(worker_thread, 1710 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1728 worker, cpu_to_node(pool->cpu), 1711 pool->attrs->nice < 0 ? "H" : "");
1729 "kworker/%u:%d%s", pool->cpu, id, pri);
1730 else 1712 else
1731 worker->task = kthread_create(worker_thread, worker, 1713 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1732 "kworker/u:%d%s", id, pri); 1714
1715 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1716 "kworker/%s", id_buf);
1733 if (IS_ERR(worker->task)) 1717 if (IS_ERR(worker->task))
1734 goto fail; 1718 goto fail;
1735 1719
1736 if (std_worker_pool_pri(pool)) 1720 /*
1737 set_user_nice(worker->task, HIGHPRI_NICE_LEVEL); 1721 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1722 * online CPUs. It'll be re-applied when any of the CPUs come up.
1723 */
1724 set_user_nice(worker->task, pool->attrs->nice);
1725 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1726
1727 /* prevent userland from meddling with cpumask of workqueue workers */
1728 worker->task->flags |= PF_NO_SETAFFINITY;
1738 1729
1739 /* 1730 /*
1740 * Determine CPU binding of the new worker depending on 1731 * The caller is responsible for ensuring %POOL_DISASSOCIATED
1741 * %POOL_DISASSOCIATED. The caller is responsible for ensuring the 1732 * remains stable across this function. See the comments above the
1742 * flag remains stable across this function. See the comments 1733 * flag definition for details.
1743 * above the flag definition for details.
1744 *
1745 * As an unbound worker may later become a regular one if CPU comes
1746 * online, make sure every worker has %PF_THREAD_BOUND set.
1747 */ 1734 */
1748 if (!(pool->flags & POOL_DISASSOCIATED)) { 1735 if (pool->flags & POOL_DISASSOCIATED)
1749 kthread_bind(worker->task, pool->cpu);
1750 } else {
1751 worker->task->flags |= PF_THREAD_BOUND;
1752 worker->flags |= WORKER_UNBOUND; 1736 worker->flags |= WORKER_UNBOUND;
1753 } 1737
1738 /* successful, commit the pointer to idr */
1739 spin_lock_irq(&pool->lock);
1740 idr_replace(&pool->worker_idr, worker, worker->id);
1741 spin_unlock_irq(&pool->lock);
1754 1742
1755 return worker; 1743 return worker;
1744
1756fail: 1745fail:
1757 if (id >= 0) { 1746 if (id >= 0) {
1758 spin_lock_irq(&pool->lock); 1747 spin_lock_irq(&pool->lock);
1759 ida_remove(&pool->worker_ida, id); 1748 idr_remove(&pool->worker_idr, id);
1760 spin_unlock_irq(&pool->lock); 1749 spin_unlock_irq(&pool->lock);
1761 } 1750 }
1762 kfree(worker); 1751 kfree(worker);
@@ -1781,6 +1770,30 @@ static void start_worker(struct worker *worker)
1781} 1770}
1782 1771
1783/** 1772/**
1773 * create_and_start_worker - create and start a worker for a pool
1774 * @pool: the target pool
1775 *
1776 * Grab the managership of @pool and create and start a new worker for it.
1777 */
1778static int create_and_start_worker(struct worker_pool *pool)
1779{
1780 struct worker *worker;
1781
1782 mutex_lock(&pool->manager_mutex);
1783
1784 worker = create_worker(pool);
1785 if (worker) {
1786 spin_lock_irq(&pool->lock);
1787 start_worker(worker);
1788 spin_unlock_irq(&pool->lock);
1789 }
1790
1791 mutex_unlock(&pool->manager_mutex);
1792
1793 return worker ? 0 : -ENOMEM;
1794}
1795
1796/**
1784 * destroy_worker - destroy a workqueue worker 1797 * destroy_worker - destroy a workqueue worker
1785 * @worker: worker to be destroyed 1798 * @worker: worker to be destroyed
1786 * 1799 *
@@ -1792,11 +1805,14 @@ static void start_worker(struct worker *worker)
1792static void destroy_worker(struct worker *worker) 1805static void destroy_worker(struct worker *worker)
1793{ 1806{
1794 struct worker_pool *pool = worker->pool; 1807 struct worker_pool *pool = worker->pool;
1795 int id = worker->id; 1808
1809 lockdep_assert_held(&pool->manager_mutex);
1810 lockdep_assert_held(&pool->lock);
1796 1811
1797 /* sanity check frenzy */ 1812 /* sanity check frenzy */
1798 BUG_ON(worker->current_work); 1813 if (WARN_ON(worker->current_work) ||
1799 BUG_ON(!list_empty(&worker->scheduled)); 1814 WARN_ON(!list_empty(&worker->scheduled)))
1815 return;
1800 1816
1801 if (worker->flags & WORKER_STARTED) 1817 if (worker->flags & WORKER_STARTED)
1802 pool->nr_workers--; 1818 pool->nr_workers--;
@@ -1806,13 +1822,14 @@ static void destroy_worker(struct worker *worker)
1806 list_del_init(&worker->entry); 1822 list_del_init(&worker->entry);
1807 worker->flags |= WORKER_DIE; 1823 worker->flags |= WORKER_DIE;
1808 1824
1825 idr_remove(&pool->worker_idr, worker->id);
1826
1809 spin_unlock_irq(&pool->lock); 1827 spin_unlock_irq(&pool->lock);
1810 1828
1811 kthread_stop(worker->task); 1829 kthread_stop(worker->task);
1812 kfree(worker); 1830 kfree(worker);
1813 1831
1814 spin_lock_irq(&pool->lock); 1832 spin_lock_irq(&pool->lock);
1815 ida_remove(&pool->worker_ida, id);
1816} 1833}
1817 1834
1818static void idle_worker_timeout(unsigned long __pool) 1835static void idle_worker_timeout(unsigned long __pool)
@@ -1841,23 +1858,21 @@ static void idle_worker_timeout(unsigned long __pool)
1841 spin_unlock_irq(&pool->lock); 1858 spin_unlock_irq(&pool->lock);
1842} 1859}
1843 1860
1844static bool send_mayday(struct work_struct *work) 1861static void send_mayday(struct work_struct *work)
1845{ 1862{
1846 struct pool_workqueue *pwq = get_work_pwq(work); 1863 struct pool_workqueue *pwq = get_work_pwq(work);
1847 struct workqueue_struct *wq = pwq->wq; 1864 struct workqueue_struct *wq = pwq->wq;
1848 unsigned int cpu;
1849 1865
1850 if (!(wq->flags & WQ_RESCUER)) 1866 lockdep_assert_held(&wq_mayday_lock);
1851 return false; 1867
1868 if (!wq->rescuer)
1869 return;
1852 1870
1853 /* mayday mayday mayday */ 1871 /* mayday mayday mayday */
1854 cpu = pwq->pool->cpu; 1872 if (list_empty(&pwq->mayday_node)) {
1855 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ 1873 list_add_tail(&pwq->mayday_node, &wq->maydays);
1856 if (cpu == WORK_CPU_UNBOUND)
1857 cpu = 0;
1858 if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1859 wake_up_process(wq->rescuer->task); 1874 wake_up_process(wq->rescuer->task);
1860 return true; 1875 }
1861} 1876}
1862 1877
1863static void pool_mayday_timeout(unsigned long __pool) 1878static void pool_mayday_timeout(unsigned long __pool)
@@ -1865,7 +1880,8 @@ static void pool_mayday_timeout(unsigned long __pool)
1865 struct worker_pool *pool = (void *)__pool; 1880 struct worker_pool *pool = (void *)__pool;
1866 struct work_struct *work; 1881 struct work_struct *work;
1867 1882
1868 spin_lock_irq(&pool->lock); 1883 spin_lock_irq(&wq_mayday_lock); /* for wq->maydays */
1884 spin_lock(&pool->lock);
1869 1885
1870 if (need_to_create_worker(pool)) { 1886 if (need_to_create_worker(pool)) {
1871 /* 1887 /*
@@ -1878,7 +1894,8 @@ static void pool_mayday_timeout(unsigned long __pool)
1878 send_mayday(work); 1894 send_mayday(work);
1879 } 1895 }
1880 1896
1881 spin_unlock_irq(&pool->lock); 1897 spin_unlock(&pool->lock);
1898 spin_unlock_irq(&wq_mayday_lock);
1882 1899
1883 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 1900 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1884} 1901}
@@ -1893,8 +1910,8 @@ static void pool_mayday_timeout(unsigned long __pool)
1893 * sent to all rescuers with works scheduled on @pool to resolve 1910 * sent to all rescuers with works scheduled on @pool to resolve
1894 * possible allocation deadlock. 1911 * possible allocation deadlock.
1895 * 1912 *
1896 * On return, need_to_create_worker() is guaranteed to be false and 1913 * On return, need_to_create_worker() is guaranteed to be %false and
1897 * may_start_working() true. 1914 * may_start_working() %true.
1898 * 1915 *
1899 * LOCKING: 1916 * LOCKING:
1900 * spin_lock_irq(pool->lock) which may be released and regrabbed 1917 * spin_lock_irq(pool->lock) which may be released and regrabbed
@@ -1902,7 +1919,7 @@ static void pool_mayday_timeout(unsigned long __pool)
1902 * manager. 1919 * manager.
1903 * 1920 *
1904 * RETURNS: 1921 * RETURNS:
1905 * false if no action was taken and pool->lock stayed locked, true 1922 * %false if no action was taken and pool->lock stayed locked, %true
1906 * otherwise. 1923 * otherwise.
1907 */ 1924 */
1908static bool maybe_create_worker(struct worker_pool *pool) 1925static bool maybe_create_worker(struct worker_pool *pool)
@@ -1925,7 +1942,8 @@ restart:
1925 del_timer_sync(&pool->mayday_timer); 1942 del_timer_sync(&pool->mayday_timer);
1926 spin_lock_irq(&pool->lock); 1943 spin_lock_irq(&pool->lock);
1927 start_worker(worker); 1944 start_worker(worker);
1928 BUG_ON(need_to_create_worker(pool)); 1945 if (WARN_ON_ONCE(need_to_create_worker(pool)))
1946 goto restart;
1929 return true; 1947 return true;
1930 } 1948 }
1931 1949
@@ -1958,7 +1976,7 @@ restart:
1958 * multiple times. Called only from manager. 1976 * multiple times. Called only from manager.
1959 * 1977 *
1960 * RETURNS: 1978 * RETURNS:
1961 * false if no action was taken and pool->lock stayed locked, true 1979 * %false if no action was taken and pool->lock stayed locked, %true
1962 * otherwise. 1980 * otherwise.
1963 */ 1981 */
1964static bool maybe_destroy_workers(struct worker_pool *pool) 1982static bool maybe_destroy_workers(struct worker_pool *pool)
@@ -2009,42 +2027,37 @@ static bool manage_workers(struct worker *worker)
2009 struct worker_pool *pool = worker->pool; 2027 struct worker_pool *pool = worker->pool;
2010 bool ret = false; 2028 bool ret = false;
2011 2029
2012 if (pool->flags & POOL_MANAGING_WORKERS) 2030 /*
2031 * Managership is governed by two mutexes - manager_arb and
2032 * manager_mutex. manager_arb handles arbitration of manager role.
2033 * Anyone who successfully grabs manager_arb wins the arbitration
2034 * and becomes the manager. mutex_trylock() on pool->manager_arb
2035 * failure while holding pool->lock reliably indicates that someone
2036 * else is managing the pool and the worker which failed trylock
2037 * can proceed to executing work items. This means that anyone
2038 * grabbing manager_arb is responsible for actually performing
2039 * manager duties. If manager_arb is grabbed and released without
2040 * actual management, the pool may stall indefinitely.
2041 *
2042 * manager_mutex is used for exclusion of actual management
2043 * operations. The holder of manager_mutex can be sure that none
2044 * of management operations, including creation and destruction of
2045 * workers, won't take place until the mutex is released. Because
2046 * manager_mutex doesn't interfere with manager role arbitration,
2047 * it is guaranteed that the pool's management, while may be
2048 * delayed, won't be disturbed by someone else grabbing
2049 * manager_mutex.
2050 */
2051 if (!mutex_trylock(&pool->manager_arb))
2013 return ret; 2052 return ret;
2014 2053
2015 pool->flags |= POOL_MANAGING_WORKERS;
2016
2017 /* 2054 /*
2018 * To simplify both worker management and CPU hotplug, hold off 2055 * With manager arbitration won, manager_mutex would be free in
2019 * management while hotplug is in progress. CPU hotplug path can't 2056 * most cases. trylock first without dropping @pool->lock.
2020 * grab %POOL_MANAGING_WORKERS to achieve this because that can
2021 * lead to idle worker depletion (all become busy thinking someone
2022 * else is managing) which in turn can result in deadlock under
2023 * extreme circumstances. Use @pool->assoc_mutex to synchronize
2024 * manager against CPU hotplug.
2025 *
2026 * assoc_mutex would always be free unless CPU hotplug is in
2027 * progress. trylock first without dropping @pool->lock.
2028 */ 2057 */
2029 if (unlikely(!mutex_trylock(&pool->assoc_mutex))) { 2058 if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
2030 spin_unlock_irq(&pool->lock); 2059 spin_unlock_irq(&pool->lock);
2031 mutex_lock(&pool->assoc_mutex); 2060 mutex_lock(&pool->manager_mutex);
2032 /*
2033 * CPU hotplug could have happened while we were waiting
2034 * for assoc_mutex. Hotplug itself can't handle us
2035 * because manager isn't either on idle or busy list, and
2036 * @pool's state and ours could have deviated.
2037 *
2038 * As hotplug is now excluded via assoc_mutex, we can
2039 * simply try to bind. It will succeed or fail depending
2040 * on @pool's current state. Try it and adjust
2041 * %WORKER_UNBOUND accordingly.
2042 */
2043 if (worker_maybe_bind_and_lock(worker))
2044 worker->flags &= ~WORKER_UNBOUND;
2045 else
2046 worker->flags |= WORKER_UNBOUND;
2047
2048 ret = true; 2061 ret = true;
2049 } 2062 }
2050 2063
@@ -2057,8 +2070,8 @@ static bool manage_workers(struct worker *worker)
2057 ret |= maybe_destroy_workers(pool); 2070 ret |= maybe_destroy_workers(pool);
2058 ret |= maybe_create_worker(pool); 2071 ret |= maybe_create_worker(pool);
2059 2072
2060 pool->flags &= ~POOL_MANAGING_WORKERS; 2073 mutex_unlock(&pool->manager_mutex);
2061 mutex_unlock(&pool->assoc_mutex); 2074 mutex_unlock(&pool->manager_arb);
2062 return ret; 2075 return ret;
2063} 2076}
2064 2077
@@ -2212,11 +2225,11 @@ static void process_scheduled_works(struct worker *worker)
2212 * worker_thread - the worker thread function 2225 * worker_thread - the worker thread function
2213 * @__worker: self 2226 * @__worker: self
2214 * 2227 *
2215 * The worker thread function. There are NR_CPU_WORKER_POOLS dynamic pools 2228 * The worker thread function. All workers belong to a worker_pool -
2216 * of these per each cpu. These workers process all works regardless of 2229 * either a per-cpu one or dynamic unbound one. These workers process all
2217 * their specific target workqueue. The only exception is works which 2230 * work items regardless of their specific target workqueue. The only
2218 * belong to workqueues with a rescuer which will be explained in 2231 * exception is work items which belong to workqueues with a rescuer which
2219 * rescuer_thread(). 2232 * will be explained in rescuer_thread().
2220 */ 2233 */
2221static int worker_thread(void *__worker) 2234static int worker_thread(void *__worker)
2222{ 2235{
@@ -2228,19 +2241,12 @@ static int worker_thread(void *__worker)
2228woke_up: 2241woke_up:
2229 spin_lock_irq(&pool->lock); 2242 spin_lock_irq(&pool->lock);
2230 2243
2231 /* we are off idle list if destruction or rebind is requested */ 2244 /* am I supposed to die? */
2232 if (unlikely(list_empty(&worker->entry))) { 2245 if (unlikely(worker->flags & WORKER_DIE)) {
2233 spin_unlock_irq(&pool->lock); 2246 spin_unlock_irq(&pool->lock);
2234 2247 WARN_ON_ONCE(!list_empty(&worker->entry));
2235 /* if DIE is set, destruction is requested */ 2248 worker->task->flags &= ~PF_WQ_WORKER;
2236 if (worker->flags & WORKER_DIE) { 2249 return 0;
2237 worker->task->flags &= ~PF_WQ_WORKER;
2238 return 0;
2239 }
2240
2241 /* otherwise, rebind */
2242 idle_worker_rebind(worker);
2243 goto woke_up;
2244 } 2250 }
2245 2251
2246 worker_leave_idle(worker); 2252 worker_leave_idle(worker);
@@ -2258,14 +2264,16 @@ recheck:
2258 * preparing to process a work or actually processing it. 2264 * preparing to process a work or actually processing it.
2259 * Make sure nobody diddled with it while I was sleeping. 2265 * Make sure nobody diddled with it while I was sleeping.
2260 */ 2266 */
2261 BUG_ON(!list_empty(&worker->scheduled)); 2267 WARN_ON_ONCE(!list_empty(&worker->scheduled));
2262 2268
2263 /* 2269 /*
2264 * When control reaches this point, we're guaranteed to have 2270 * Finish PREP stage. We're guaranteed to have at least one idle
2265 * at least one idle worker or that someone else has already 2271 * worker or that someone else has already assumed the manager
2266 * assumed the manager role. 2272 * role. This is where @worker starts participating in concurrency
2273 * management if applicable and concurrency management is restored
2274 * after being rebound. See rebind_workers() for details.
2267 */ 2275 */
2268 worker_clr_flags(worker, WORKER_PREP); 2276 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2269 2277
2270 do { 2278 do {
2271 struct work_struct *work = 2279 struct work_struct *work =
@@ -2307,7 +2315,7 @@ sleep:
2307 * @__rescuer: self 2315 * @__rescuer: self
2308 * 2316 *
2309 * Workqueue rescuer thread function. There's one rescuer for each 2317 * Workqueue rescuer thread function. There's one rescuer for each
2310 * workqueue which has WQ_RESCUER set. 2318 * workqueue which has WQ_MEM_RECLAIM set.
2311 * 2319 *
2312 * Regular work processing on a pool may block trying to create a new 2320 * Regular work processing on a pool may block trying to create a new
2313 * worker which uses GFP_KERNEL allocation which has slight chance of 2321 * worker which uses GFP_KERNEL allocation which has slight chance of
@@ -2326,8 +2334,6 @@ static int rescuer_thread(void *__rescuer)
2326 struct worker *rescuer = __rescuer; 2334 struct worker *rescuer = __rescuer;
2327 struct workqueue_struct *wq = rescuer->rescue_wq; 2335 struct workqueue_struct *wq = rescuer->rescue_wq;
2328 struct list_head *scheduled = &rescuer->scheduled; 2336 struct list_head *scheduled = &rescuer->scheduled;
2329 bool is_unbound = wq->flags & WQ_UNBOUND;
2330 unsigned int cpu;
2331 2337
2332 set_user_nice(current, RESCUER_NICE_LEVEL); 2338 set_user_nice(current, RESCUER_NICE_LEVEL);
2333 2339
@@ -2345,28 +2351,29 @@ repeat:
2345 return 0; 2351 return 0;
2346 } 2352 }
2347 2353
2348 /* 2354 /* see whether any pwq is asking for help */
2349 * See whether any cpu is asking for help. Unbounded 2355 spin_lock_irq(&wq_mayday_lock);
2350 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND. 2356
2351 */ 2357 while (!list_empty(&wq->maydays)) {
2352 for_each_mayday_cpu(cpu, wq->mayday_mask) { 2358 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2353 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; 2359 struct pool_workqueue, mayday_node);
2354 struct pool_workqueue *pwq = get_pwq(tcpu, wq);
2355 struct worker_pool *pool = pwq->pool; 2360 struct worker_pool *pool = pwq->pool;
2356 struct work_struct *work, *n; 2361 struct work_struct *work, *n;
2357 2362
2358 __set_current_state(TASK_RUNNING); 2363 __set_current_state(TASK_RUNNING);
2359 mayday_clear_cpu(cpu, wq->mayday_mask); 2364 list_del_init(&pwq->mayday_node);
2365
2366 spin_unlock_irq(&wq_mayday_lock);
2360 2367
2361 /* migrate to the target cpu if possible */ 2368 /* migrate to the target cpu if possible */
2369 worker_maybe_bind_and_lock(pool);
2362 rescuer->pool = pool; 2370 rescuer->pool = pool;
2363 worker_maybe_bind_and_lock(rescuer);
2364 2371
2365 /* 2372 /*
2366 * Slurp in all works issued via this workqueue and 2373 * Slurp in all works issued via this workqueue and
2367 * process'em. 2374 * process'em.
2368 */ 2375 */
2369 BUG_ON(!list_empty(&rescuer->scheduled)); 2376 WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
2370 list_for_each_entry_safe(work, n, &pool->worklist, entry) 2377 list_for_each_entry_safe(work, n, &pool->worklist, entry)
2371 if (get_work_pwq(work) == pwq) 2378 if (get_work_pwq(work) == pwq)
2372 move_linked_works(work, scheduled, &n); 2379 move_linked_works(work, scheduled, &n);
@@ -2381,9 +2388,13 @@ repeat:
2381 if (keep_working(pool)) 2388 if (keep_working(pool))
2382 wake_up_worker(pool); 2389 wake_up_worker(pool);
2383 2390
2384 spin_unlock_irq(&pool->lock); 2391 rescuer->pool = NULL;
2392 spin_unlock(&pool->lock);
2393 spin_lock(&wq_mayday_lock);
2385 } 2394 }
2386 2395
2396 spin_unlock_irq(&wq_mayday_lock);
2397
2387 /* rescuers should never participate in concurrency management */ 2398 /* rescuers should never participate in concurrency management */
2388 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 2399 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2389 schedule(); 2400 schedule();
@@ -2487,7 +2498,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
2487 * advanced to @work_color. 2498 * advanced to @work_color.
2488 * 2499 *
2489 * CONTEXT: 2500 * CONTEXT:
2490 * mutex_lock(wq->flush_mutex). 2501 * mutex_lock(wq->mutex).
2491 * 2502 *
2492 * RETURNS: 2503 * RETURNS:
2493 * %true if @flush_color >= 0 and there's something to flush. %false 2504 * %true if @flush_color >= 0 and there's something to flush. %false
@@ -2497,21 +2508,20 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2497 int flush_color, int work_color) 2508 int flush_color, int work_color)
2498{ 2509{
2499 bool wait = false; 2510 bool wait = false;
2500 unsigned int cpu; 2511 struct pool_workqueue *pwq;
2501 2512
2502 if (flush_color >= 0) { 2513 if (flush_color >= 0) {
2503 BUG_ON(atomic_read(&wq->nr_pwqs_to_flush)); 2514 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2504 atomic_set(&wq->nr_pwqs_to_flush, 1); 2515 atomic_set(&wq->nr_pwqs_to_flush, 1);
2505 } 2516 }
2506 2517
2507 for_each_pwq_cpu(cpu, wq) { 2518 for_each_pwq(pwq, wq) {
2508 struct pool_workqueue *pwq = get_pwq(cpu, wq);
2509 struct worker_pool *pool = pwq->pool; 2519 struct worker_pool *pool = pwq->pool;
2510 2520
2511 spin_lock_irq(&pool->lock); 2521 spin_lock_irq(&pool->lock);
2512 2522
2513 if (flush_color >= 0) { 2523 if (flush_color >= 0) {
2514 BUG_ON(pwq->flush_color != -1); 2524 WARN_ON_ONCE(pwq->flush_color != -1);
2515 2525
2516 if (pwq->nr_in_flight[flush_color]) { 2526 if (pwq->nr_in_flight[flush_color]) {
2517 pwq->flush_color = flush_color; 2527 pwq->flush_color = flush_color;
@@ -2521,7 +2531,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2521 } 2531 }
2522 2532
2523 if (work_color >= 0) { 2533 if (work_color >= 0) {
2524 BUG_ON(work_color != work_next_color(pwq->work_color)); 2534 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2525 pwq->work_color = work_color; 2535 pwq->work_color = work_color;
2526 } 2536 }
2527 2537
@@ -2538,11 +2548,8 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2538 * flush_workqueue - ensure that any scheduled work has run to completion. 2548 * flush_workqueue - ensure that any scheduled work has run to completion.
2539 * @wq: workqueue to flush 2549 * @wq: workqueue to flush
2540 * 2550 *
2541 * Forces execution of the workqueue and blocks until its completion. 2551 * This function sleeps until all work items which were queued on entry
2542 * This is typically used in driver shutdown handlers. 2552 * have finished execution, but it is not livelocked by new incoming ones.
2543 *
2544 * We sleep until all works which were queued on entry have been handled,
2545 * but we are not livelocked by new incoming ones.
2546 */ 2553 */
2547void flush_workqueue(struct workqueue_struct *wq) 2554void flush_workqueue(struct workqueue_struct *wq)
2548{ 2555{
@@ -2556,7 +2563,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2556 lock_map_acquire(&wq->lockdep_map); 2563 lock_map_acquire(&wq->lockdep_map);
2557 lock_map_release(&wq->lockdep_map); 2564 lock_map_release(&wq->lockdep_map);
2558 2565
2559 mutex_lock(&wq->flush_mutex); 2566 mutex_lock(&wq->mutex);
2560 2567
2561 /* 2568 /*
2562 * Start-to-wait phase 2569 * Start-to-wait phase
@@ -2569,13 +2576,13 @@ void flush_workqueue(struct workqueue_struct *wq)
2569 * becomes our flush_color and work_color is advanced 2576 * becomes our flush_color and work_color is advanced
2570 * by one. 2577 * by one.
2571 */ 2578 */
2572 BUG_ON(!list_empty(&wq->flusher_overflow)); 2579 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2573 this_flusher.flush_color = wq->work_color; 2580 this_flusher.flush_color = wq->work_color;
2574 wq->work_color = next_color; 2581 wq->work_color = next_color;
2575 2582
2576 if (!wq->first_flusher) { 2583 if (!wq->first_flusher) {
2577 /* no flush in progress, become the first flusher */ 2584 /* no flush in progress, become the first flusher */
2578 BUG_ON(wq->flush_color != this_flusher.flush_color); 2585 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2579 2586
2580 wq->first_flusher = &this_flusher; 2587 wq->first_flusher = &this_flusher;
2581 2588
@@ -2588,7 +2595,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2588 } 2595 }
2589 } else { 2596 } else {
2590 /* wait in queue */ 2597 /* wait in queue */
2591 BUG_ON(wq->flush_color == this_flusher.flush_color); 2598 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2592 list_add_tail(&this_flusher.list, &wq->flusher_queue); 2599 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2593 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 2600 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2594 } 2601 }
@@ -2601,7 +2608,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2601 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 2608 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2602 } 2609 }
2603 2610
2604 mutex_unlock(&wq->flush_mutex); 2611 mutex_unlock(&wq->mutex);
2605 2612
2606 wait_for_completion(&this_flusher.done); 2613 wait_for_completion(&this_flusher.done);
2607 2614
@@ -2614,7 +2621,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2614 if (wq->first_flusher != &this_flusher) 2621 if (wq->first_flusher != &this_flusher)
2615 return; 2622 return;
2616 2623
2617 mutex_lock(&wq->flush_mutex); 2624 mutex_lock(&wq->mutex);
2618 2625
2619 /* we might have raced, check again with mutex held */ 2626 /* we might have raced, check again with mutex held */
2620 if (wq->first_flusher != &this_flusher) 2627 if (wq->first_flusher != &this_flusher)
@@ -2622,8 +2629,8 @@ void flush_workqueue(struct workqueue_struct *wq)
2622 2629
2623 wq->first_flusher = NULL; 2630 wq->first_flusher = NULL;
2624 2631
2625 BUG_ON(!list_empty(&this_flusher.list)); 2632 WARN_ON_ONCE(!list_empty(&this_flusher.list));
2626 BUG_ON(wq->flush_color != this_flusher.flush_color); 2633 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2627 2634
2628 while (true) { 2635 while (true) {
2629 struct wq_flusher *next, *tmp; 2636 struct wq_flusher *next, *tmp;
@@ -2636,8 +2643,8 @@ void flush_workqueue(struct workqueue_struct *wq)
2636 complete(&next->done); 2643 complete(&next->done);
2637 } 2644 }
2638 2645
2639 BUG_ON(!list_empty(&wq->flusher_overflow) && 2646 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2640 wq->flush_color != work_next_color(wq->work_color)); 2647 wq->flush_color != work_next_color(wq->work_color));
2641 2648
2642 /* this flush_color is finished, advance by one */ 2649 /* this flush_color is finished, advance by one */
2643 wq->flush_color = work_next_color(wq->flush_color); 2650 wq->flush_color = work_next_color(wq->flush_color);
@@ -2661,7 +2668,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2661 } 2668 }
2662 2669
2663 if (list_empty(&wq->flusher_queue)) { 2670 if (list_empty(&wq->flusher_queue)) {
2664 BUG_ON(wq->flush_color != wq->work_color); 2671 WARN_ON_ONCE(wq->flush_color != wq->work_color);
2665 break; 2672 break;
2666 } 2673 }
2667 2674
@@ -2669,8 +2676,8 @@ void flush_workqueue(struct workqueue_struct *wq)
2669 * Need to flush more colors. Make the next flusher 2676 * Need to flush more colors. Make the next flusher
2670 * the new first flusher and arm pwqs. 2677 * the new first flusher and arm pwqs.
2671 */ 2678 */
2672 BUG_ON(wq->flush_color == wq->work_color); 2679 WARN_ON_ONCE(wq->flush_color == wq->work_color);
2673 BUG_ON(wq->flush_color != next->flush_color); 2680 WARN_ON_ONCE(wq->flush_color != next->flush_color);
2674 2681
2675 list_del_init(&next->list); 2682 list_del_init(&next->list);
2676 wq->first_flusher = next; 2683 wq->first_flusher = next;
@@ -2686,7 +2693,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2686 } 2693 }
2687 2694
2688out_unlock: 2695out_unlock:
2689 mutex_unlock(&wq->flush_mutex); 2696 mutex_unlock(&wq->mutex);
2690} 2697}
2691EXPORT_SYMBOL_GPL(flush_workqueue); 2698EXPORT_SYMBOL_GPL(flush_workqueue);
2692 2699
@@ -2704,22 +2711,23 @@ EXPORT_SYMBOL_GPL(flush_workqueue);
2704void drain_workqueue(struct workqueue_struct *wq) 2711void drain_workqueue(struct workqueue_struct *wq)
2705{ 2712{
2706 unsigned int flush_cnt = 0; 2713 unsigned int flush_cnt = 0;
2707 unsigned int cpu; 2714 struct pool_workqueue *pwq;
2708 2715
2709 /* 2716 /*
2710 * __queue_work() needs to test whether there are drainers, is much 2717 * __queue_work() needs to test whether there are drainers, is much
2711 * hotter than drain_workqueue() and already looks at @wq->flags. 2718 * hotter than drain_workqueue() and already looks at @wq->flags.
2712 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers. 2719 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2713 */ 2720 */
2714 spin_lock(&workqueue_lock); 2721 mutex_lock(&wq->mutex);
2715 if (!wq->nr_drainers++) 2722 if (!wq->nr_drainers++)
2716 wq->flags |= WQ_DRAINING; 2723 wq->flags |= __WQ_DRAINING;
2717 spin_unlock(&workqueue_lock); 2724 mutex_unlock(&wq->mutex);
2718reflush: 2725reflush:
2719 flush_workqueue(wq); 2726 flush_workqueue(wq);
2720 2727
2721 for_each_pwq_cpu(cpu, wq) { 2728 mutex_lock(&wq->mutex);
2722 struct pool_workqueue *pwq = get_pwq(cpu, wq); 2729
2730 for_each_pwq(pwq, wq) {
2723 bool drained; 2731 bool drained;
2724 2732
2725 spin_lock_irq(&pwq->pool->lock); 2733 spin_lock_irq(&pwq->pool->lock);
@@ -2731,15 +2739,16 @@ reflush:
2731 2739
2732 if (++flush_cnt == 10 || 2740 if (++flush_cnt == 10 ||
2733 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 2741 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2734 pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n", 2742 pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2735 wq->name, flush_cnt); 2743 wq->name, flush_cnt);
2744
2745 mutex_unlock(&wq->mutex);
2736 goto reflush; 2746 goto reflush;
2737 } 2747 }
2738 2748
2739 spin_lock(&workqueue_lock);
2740 if (!--wq->nr_drainers) 2749 if (!--wq->nr_drainers)
2741 wq->flags &= ~WQ_DRAINING; 2750 wq->flags &= ~__WQ_DRAINING;
2742 spin_unlock(&workqueue_lock); 2751 mutex_unlock(&wq->mutex);
2743} 2752}
2744EXPORT_SYMBOL_GPL(drain_workqueue); 2753EXPORT_SYMBOL_GPL(drain_workqueue);
2745 2754
@@ -2750,11 +2759,15 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2750 struct pool_workqueue *pwq; 2759 struct pool_workqueue *pwq;
2751 2760
2752 might_sleep(); 2761 might_sleep();
2762
2763 local_irq_disable();
2753 pool = get_work_pool(work); 2764 pool = get_work_pool(work);
2754 if (!pool) 2765 if (!pool) {
2766 local_irq_enable();
2755 return false; 2767 return false;
2768 }
2756 2769
2757 spin_lock_irq(&pool->lock); 2770 spin_lock(&pool->lock);
2758 /* see the comment in try_to_grab_pending() with the same code */ 2771 /* see the comment in try_to_grab_pending() with the same code */
2759 pwq = get_work_pwq(work); 2772 pwq = get_work_pwq(work);
2760 if (pwq) { 2773 if (pwq) {
@@ -2776,7 +2789,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2776 * flusher is not running on the same workqueue by verifying write 2789 * flusher is not running on the same workqueue by verifying write
2777 * access. 2790 * access.
2778 */ 2791 */
2779 if (pwq->wq->saved_max_active == 1 || pwq->wq->flags & WQ_RESCUER) 2792 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
2780 lock_map_acquire(&pwq->wq->lockdep_map); 2793 lock_map_acquire(&pwq->wq->lockdep_map);
2781 else 2794 else
2782 lock_map_acquire_read(&pwq->wq->lockdep_map); 2795 lock_map_acquire_read(&pwq->wq->lockdep_map);
@@ -2933,66 +2946,6 @@ bool cancel_delayed_work_sync(struct delayed_work *dwork)
2933EXPORT_SYMBOL(cancel_delayed_work_sync); 2946EXPORT_SYMBOL(cancel_delayed_work_sync);
2934 2947
2935/** 2948/**
2936 * schedule_work_on - put work task on a specific cpu
2937 * @cpu: cpu to put the work task on
2938 * @work: job to be done
2939 *
2940 * This puts a job on a specific cpu
2941 */
2942bool schedule_work_on(int cpu, struct work_struct *work)
2943{
2944 return queue_work_on(cpu, system_wq, work);
2945}
2946EXPORT_SYMBOL(schedule_work_on);
2947
2948/**
2949 * schedule_work - put work task in global workqueue
2950 * @work: job to be done
2951 *
2952 * Returns %false if @work was already on the kernel-global workqueue and
2953 * %true otherwise.
2954 *
2955 * This puts a job in the kernel-global workqueue if it was not already
2956 * queued and leaves it in the same position on the kernel-global
2957 * workqueue otherwise.
2958 */
2959bool schedule_work(struct work_struct *work)
2960{
2961 return queue_work(system_wq, work);
2962}
2963EXPORT_SYMBOL(schedule_work);
2964
2965/**
2966 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2967 * @cpu: cpu to use
2968 * @dwork: job to be done
2969 * @delay: number of jiffies to wait
2970 *
2971 * After waiting for a given time this puts a job in the kernel-global
2972 * workqueue on the specified CPU.
2973 */
2974bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
2975 unsigned long delay)
2976{
2977 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
2978}
2979EXPORT_SYMBOL(schedule_delayed_work_on);
2980
2981/**
2982 * schedule_delayed_work - put work task in global workqueue after delay
2983 * @dwork: job to be done
2984 * @delay: number of jiffies to wait or 0 for immediate execution
2985 *
2986 * After waiting for a given time this puts a job in the kernel-global
2987 * workqueue.
2988 */
2989bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
2990{
2991 return queue_delayed_work(system_wq, dwork, delay);
2992}
2993EXPORT_SYMBOL(schedule_delayed_work);
2994
2995/**
2996 * schedule_on_each_cpu - execute a function synchronously on each online CPU 2949 * schedule_on_each_cpu - execute a function synchronously on each online CPU
2997 * @func: the function to call 2950 * @func: the function to call
2998 * 2951 *
@@ -3085,51 +3038,1025 @@ int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3085} 3038}
3086EXPORT_SYMBOL_GPL(execute_in_process_context); 3039EXPORT_SYMBOL_GPL(execute_in_process_context);
3087 3040
3088int keventd_up(void) 3041#ifdef CONFIG_SYSFS
3042/*
3043 * Workqueues with WQ_SYSFS flag set is visible to userland via
3044 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
3045 * following attributes.
3046 *
3047 * per_cpu RO bool : whether the workqueue is per-cpu or unbound
3048 * max_active RW int : maximum number of in-flight work items
3049 *
3050 * Unbound workqueues have the following extra attributes.
3051 *
3052 * id RO int : the associated pool ID
3053 * nice RW int : nice value of the workers
3054 * cpumask RW mask : bitmask of allowed CPUs for the workers
3055 */
3056struct wq_device {
3057 struct workqueue_struct *wq;
3058 struct device dev;
3059};
3060
3061static struct workqueue_struct *dev_to_wq(struct device *dev)
3062{
3063 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
3064
3065 return wq_dev->wq;
3066}
3067
3068static ssize_t wq_per_cpu_show(struct device *dev,
3069 struct device_attribute *attr, char *buf)
3070{
3071 struct workqueue_struct *wq = dev_to_wq(dev);
3072
3073 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
3074}
3075
3076static ssize_t wq_max_active_show(struct device *dev,
3077 struct device_attribute *attr, char *buf)
3089{ 3078{
3090 return system_wq != NULL; 3079 struct workqueue_struct *wq = dev_to_wq(dev);
3080
3081 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
3091} 3082}
3092 3083
3093static int alloc_pwqs(struct workqueue_struct *wq) 3084static ssize_t wq_max_active_store(struct device *dev,
3085 struct device_attribute *attr,
3086 const char *buf, size_t count)
3094{ 3087{
3088 struct workqueue_struct *wq = dev_to_wq(dev);
3089 int val;
3090
3091 if (sscanf(buf, "%d", &val) != 1 || val <= 0)
3092 return -EINVAL;
3093
3094 workqueue_set_max_active(wq, val);
3095 return count;
3096}
3097
3098static struct device_attribute wq_sysfs_attrs[] = {
3099 __ATTR(per_cpu, 0444, wq_per_cpu_show, NULL),
3100 __ATTR(max_active, 0644, wq_max_active_show, wq_max_active_store),
3101 __ATTR_NULL,
3102};
3103
3104static ssize_t wq_pool_ids_show(struct device *dev,
3105 struct device_attribute *attr, char *buf)
3106{
3107 struct workqueue_struct *wq = dev_to_wq(dev);
3108 const char *delim = "";
3109 int node, written = 0;
3110
3111 rcu_read_lock_sched();
3112 for_each_node(node) {
3113 written += scnprintf(buf + written, PAGE_SIZE - written,
3114 "%s%d:%d", delim, node,
3115 unbound_pwq_by_node(wq, node)->pool->id);
3116 delim = " ";
3117 }
3118 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
3119 rcu_read_unlock_sched();
3120
3121 return written;
3122}
3123
3124static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
3125 char *buf)
3126{
3127 struct workqueue_struct *wq = dev_to_wq(dev);
3128 int written;
3129
3130 mutex_lock(&wq->mutex);
3131 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
3132 mutex_unlock(&wq->mutex);
3133
3134 return written;
3135}
3136
3137/* prepare workqueue_attrs for sysfs store operations */
3138static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
3139{
3140 struct workqueue_attrs *attrs;
3141
3142 attrs = alloc_workqueue_attrs(GFP_KERNEL);
3143 if (!attrs)
3144 return NULL;
3145
3146 mutex_lock(&wq->mutex);
3147 copy_workqueue_attrs(attrs, wq->unbound_attrs);
3148 mutex_unlock(&wq->mutex);
3149 return attrs;
3150}
3151
3152static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
3153 const char *buf, size_t count)
3154{
3155 struct workqueue_struct *wq = dev_to_wq(dev);
3156 struct workqueue_attrs *attrs;
3157 int ret;
3158
3159 attrs = wq_sysfs_prep_attrs(wq);
3160 if (!attrs)
3161 return -ENOMEM;
3162
3163 if (sscanf(buf, "%d", &attrs->nice) == 1 &&
3164 attrs->nice >= -20 && attrs->nice <= 19)
3165 ret = apply_workqueue_attrs(wq, attrs);
3166 else
3167 ret = -EINVAL;
3168
3169 free_workqueue_attrs(attrs);
3170 return ret ?: count;
3171}
3172
3173static ssize_t wq_cpumask_show(struct device *dev,
3174 struct device_attribute *attr, char *buf)
3175{
3176 struct workqueue_struct *wq = dev_to_wq(dev);
3177 int written;
3178
3179 mutex_lock(&wq->mutex);
3180 written = cpumask_scnprintf(buf, PAGE_SIZE, wq->unbound_attrs->cpumask);
3181 mutex_unlock(&wq->mutex);
3182
3183 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
3184 return written;
3185}
3186
3187static ssize_t wq_cpumask_store(struct device *dev,
3188 struct device_attribute *attr,
3189 const char *buf, size_t count)
3190{
3191 struct workqueue_struct *wq = dev_to_wq(dev);
3192 struct workqueue_attrs *attrs;
3193 int ret;
3194
3195 attrs = wq_sysfs_prep_attrs(wq);
3196 if (!attrs)
3197 return -ENOMEM;
3198
3199 ret = cpumask_parse(buf, attrs->cpumask);
3200 if (!ret)
3201 ret = apply_workqueue_attrs(wq, attrs);
3202
3203 free_workqueue_attrs(attrs);
3204 return ret ?: count;
3205}
3206
3207static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
3208 char *buf)
3209{
3210 struct workqueue_struct *wq = dev_to_wq(dev);
3211 int written;
3212
3213 mutex_lock(&wq->mutex);
3214 written = scnprintf(buf, PAGE_SIZE, "%d\n",
3215 !wq->unbound_attrs->no_numa);
3216 mutex_unlock(&wq->mutex);
3217
3218 return written;
3219}
3220
3221static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
3222 const char *buf, size_t count)
3223{
3224 struct workqueue_struct *wq = dev_to_wq(dev);
3225 struct workqueue_attrs *attrs;
3226 int v, ret;
3227
3228 attrs = wq_sysfs_prep_attrs(wq);
3229 if (!attrs)
3230 return -ENOMEM;
3231
3232 ret = -EINVAL;
3233 if (sscanf(buf, "%d", &v) == 1) {
3234 attrs->no_numa = !v;
3235 ret = apply_workqueue_attrs(wq, attrs);
3236 }
3237
3238 free_workqueue_attrs(attrs);
3239 return ret ?: count;
3240}
3241
3242static struct device_attribute wq_sysfs_unbound_attrs[] = {
3243 __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
3244 __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
3245 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
3246 __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
3247 __ATTR_NULL,
3248};
3249
3250static struct bus_type wq_subsys = {
3251 .name = "workqueue",
3252 .dev_attrs = wq_sysfs_attrs,
3253};
3254
3255static int __init wq_sysfs_init(void)
3256{
3257 return subsys_virtual_register(&wq_subsys, NULL);
3258}
3259core_initcall(wq_sysfs_init);
3260
3261static void wq_device_release(struct device *dev)
3262{
3263 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
3264
3265 kfree(wq_dev);
3266}
3267
3268/**
3269 * workqueue_sysfs_register - make a workqueue visible in sysfs
3270 * @wq: the workqueue to register
3271 *
3272 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
3273 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
3274 * which is the preferred method.
3275 *
3276 * Workqueue user should use this function directly iff it wants to apply
3277 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
3278 * apply_workqueue_attrs() may race against userland updating the
3279 * attributes.
3280 *
3281 * Returns 0 on success, -errno on failure.
3282 */
3283int workqueue_sysfs_register(struct workqueue_struct *wq)
3284{
3285 struct wq_device *wq_dev;
3286 int ret;
3287
3095 /* 3288 /*
3096 * pwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. 3289 * Adjusting max_active or creating new pwqs by applyting
3097 * Make sure that the alignment isn't lower than that of 3290 * attributes breaks ordering guarantee. Disallow exposing ordered
3098 * unsigned long long. 3291 * workqueues.
3099 */ 3292 */
3100 const size_t size = sizeof(struct pool_workqueue); 3293 if (WARN_ON(wq->flags & __WQ_ORDERED))
3101 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, 3294 return -EINVAL;
3102 __alignof__(unsigned long long));
3103 3295
3104 if (!(wq->flags & WQ_UNBOUND)) 3296 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
3105 wq->pool_wq.pcpu = __alloc_percpu(size, align); 3297 if (!wq_dev)
3106 else { 3298 return -ENOMEM;
3107 void *ptr; 3299
3300 wq_dev->wq = wq;
3301 wq_dev->dev.bus = &wq_subsys;
3302 wq_dev->dev.init_name = wq->name;
3303 wq_dev->dev.release = wq_device_release;
3304
3305 /*
3306 * unbound_attrs are created separately. Suppress uevent until
3307 * everything is ready.
3308 */
3309 dev_set_uevent_suppress(&wq_dev->dev, true);
3310
3311 ret = device_register(&wq_dev->dev);
3312 if (ret) {
3313 kfree(wq_dev);
3314 wq->wq_dev = NULL;
3315 return ret;
3316 }
3317
3318 if (wq->flags & WQ_UNBOUND) {
3319 struct device_attribute *attr;
3320
3321 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
3322 ret = device_create_file(&wq_dev->dev, attr);
3323 if (ret) {
3324 device_unregister(&wq_dev->dev);
3325 wq->wq_dev = NULL;
3326 return ret;
3327 }
3328 }
3329 }
3330
3331 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
3332 return 0;
3333}
3334
3335/**
3336 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
3337 * @wq: the workqueue to unregister
3338 *
3339 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
3340 */
3341static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
3342{
3343 struct wq_device *wq_dev = wq->wq_dev;
3344
3345 if (!wq->wq_dev)
3346 return;
3347
3348 wq->wq_dev = NULL;
3349 device_unregister(&wq_dev->dev);
3350}
3351#else /* CONFIG_SYSFS */
3352static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
3353#endif /* CONFIG_SYSFS */
3354
3355/**
3356 * free_workqueue_attrs - free a workqueue_attrs
3357 * @attrs: workqueue_attrs to free
3358 *
3359 * Undo alloc_workqueue_attrs().
3360 */
3361void free_workqueue_attrs(struct workqueue_attrs *attrs)
3362{
3363 if (attrs) {
3364 free_cpumask_var(attrs->cpumask);
3365 kfree(attrs);
3366 }
3367}
3368
3369/**
3370 * alloc_workqueue_attrs - allocate a workqueue_attrs
3371 * @gfp_mask: allocation mask to use
3372 *
3373 * Allocate a new workqueue_attrs, initialize with default settings and
3374 * return it. Returns NULL on failure.
3375 */
3376struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
3377{
3378 struct workqueue_attrs *attrs;
3379
3380 attrs = kzalloc(sizeof(*attrs), gfp_mask);
3381 if (!attrs)
3382 goto fail;
3383 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
3384 goto fail;
3385
3386 cpumask_copy(attrs->cpumask, cpu_possible_mask);
3387 return attrs;
3388fail:
3389 free_workqueue_attrs(attrs);
3390 return NULL;
3391}
3392
3393static void copy_workqueue_attrs(struct workqueue_attrs *to,
3394 const struct workqueue_attrs *from)
3395{
3396 to->nice = from->nice;
3397 cpumask_copy(to->cpumask, from->cpumask);
3398}
3399
3400/* hash value of the content of @attr */
3401static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3402{
3403 u32 hash = 0;
3404
3405 hash = jhash_1word(attrs->nice, hash);
3406 hash = jhash(cpumask_bits(attrs->cpumask),
3407 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3408 return hash;
3409}
3410
3411/* content equality test */
3412static bool wqattrs_equal(const struct workqueue_attrs *a,
3413 const struct workqueue_attrs *b)
3414{
3415 if (a->nice != b->nice)
3416 return false;
3417 if (!cpumask_equal(a->cpumask, b->cpumask))
3418 return false;
3419 return true;
3420}
3421
3422/**
3423 * init_worker_pool - initialize a newly zalloc'd worker_pool
3424 * @pool: worker_pool to initialize
3425 *
3426 * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs.
3427 * Returns 0 on success, -errno on failure. Even on failure, all fields
3428 * inside @pool proper are initialized and put_unbound_pool() can be called
3429 * on @pool safely to release it.
3430 */
3431static int init_worker_pool(struct worker_pool *pool)
3432{
3433 spin_lock_init(&pool->lock);
3434 pool->id = -1;
3435 pool->cpu = -1;
3436 pool->node = NUMA_NO_NODE;
3437 pool->flags |= POOL_DISASSOCIATED;
3438 INIT_LIST_HEAD(&pool->worklist);
3439 INIT_LIST_HEAD(&pool->idle_list);
3440 hash_init(pool->busy_hash);
3441
3442 init_timer_deferrable(&pool->idle_timer);
3443 pool->idle_timer.function = idle_worker_timeout;
3444 pool->idle_timer.data = (unsigned long)pool;
3445
3446 setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3447 (unsigned long)pool);
3448
3449 mutex_init(&pool->manager_arb);
3450 mutex_init(&pool->manager_mutex);
3451 idr_init(&pool->worker_idr);
3452
3453 INIT_HLIST_NODE(&pool->hash_node);
3454 pool->refcnt = 1;
3455
3456 /* shouldn't fail above this point */
3457 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
3458 if (!pool->attrs)
3459 return -ENOMEM;
3460 return 0;
3461}
3462
3463static void rcu_free_pool(struct rcu_head *rcu)
3464{
3465 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3466
3467 idr_destroy(&pool->worker_idr);
3468 free_workqueue_attrs(pool->attrs);
3469 kfree(pool);
3470}
3471
3472/**
3473 * put_unbound_pool - put a worker_pool
3474 * @pool: worker_pool to put
3475 *
3476 * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
3477 * safe manner. get_unbound_pool() calls this function on its failure path
3478 * and this function should be able to release pools which went through,
3479 * successfully or not, init_worker_pool().
3480 *
3481 * Should be called with wq_pool_mutex held.
3482 */
3483static void put_unbound_pool(struct worker_pool *pool)
3484{
3485 struct worker *worker;
3486
3487 lockdep_assert_held(&wq_pool_mutex);
3488
3489 if (--pool->refcnt)
3490 return;
3491
3492 /* sanity checks */
3493 if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) ||
3494 WARN_ON(!list_empty(&pool->worklist)))
3495 return;
3496
3497 /* release id and unhash */
3498 if (pool->id >= 0)
3499 idr_remove(&worker_pool_idr, pool->id);
3500 hash_del(&pool->hash_node);
3501
3502 /*
3503 * Become the manager and destroy all workers. Grabbing
3504 * manager_arb prevents @pool's workers from blocking on
3505 * manager_mutex.
3506 */
3507 mutex_lock(&pool->manager_arb);
3508 mutex_lock(&pool->manager_mutex);
3509 spin_lock_irq(&pool->lock);
3510
3511 while ((worker = first_worker(pool)))
3512 destroy_worker(worker);
3513 WARN_ON(pool->nr_workers || pool->nr_idle);
3514
3515 spin_unlock_irq(&pool->lock);
3516 mutex_unlock(&pool->manager_mutex);
3517 mutex_unlock(&pool->manager_arb);
3518
3519 /* shut down the timers */
3520 del_timer_sync(&pool->idle_timer);
3521 del_timer_sync(&pool->mayday_timer);
3522
3523 /* sched-RCU protected to allow dereferences from get_work_pool() */
3524 call_rcu_sched(&pool->rcu, rcu_free_pool);
3525}
3526
3527/**
3528 * get_unbound_pool - get a worker_pool with the specified attributes
3529 * @attrs: the attributes of the worker_pool to get
3530 *
3531 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3532 * reference count and return it. If there already is a matching
3533 * worker_pool, it will be used; otherwise, this function attempts to
3534 * create a new one. On failure, returns NULL.
3535 *
3536 * Should be called with wq_pool_mutex held.
3537 */
3538static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3539{
3540 u32 hash = wqattrs_hash(attrs);
3541 struct worker_pool *pool;
3542 int node;
3543
3544 lockdep_assert_held(&wq_pool_mutex);
3545
3546 /* do we already have a matching pool? */
3547 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3548 if (wqattrs_equal(pool->attrs, attrs)) {
3549 pool->refcnt++;
3550 goto out_unlock;
3551 }
3552 }
3553
3554 /* nope, create a new one */
3555 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
3556 if (!pool || init_worker_pool(pool) < 0)
3557 goto fail;
3558
3559 if (workqueue_freezing)
3560 pool->flags |= POOL_FREEZING;
3561
3562 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3563 copy_workqueue_attrs(pool->attrs, attrs);
3564
3565 /* if cpumask is contained inside a NUMA node, we belong to that node */
3566 if (wq_numa_enabled) {
3567 for_each_node(node) {
3568 if (cpumask_subset(pool->attrs->cpumask,
3569 wq_numa_possible_cpumask[node])) {
3570 pool->node = node;
3571 break;
3572 }
3573 }
3574 }
3575
3576 if (worker_pool_assign_id(pool) < 0)
3577 goto fail;
3578
3579 /* create and start the initial worker */
3580 if (create_and_start_worker(pool) < 0)
3581 goto fail;
3582
3583 /* install */
3584 hash_add(unbound_pool_hash, &pool->hash_node, hash);
3585out_unlock:
3586 return pool;
3587fail:
3588 if (pool)
3589 put_unbound_pool(pool);
3590 return NULL;
3591}
3592
3593static void rcu_free_pwq(struct rcu_head *rcu)
3594{
3595 kmem_cache_free(pwq_cache,
3596 container_of(rcu, struct pool_workqueue, rcu));
3597}
3598
3599/*
3600 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3601 * and needs to be destroyed.
3602 */
3603static void pwq_unbound_release_workfn(struct work_struct *work)
3604{
3605 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3606 unbound_release_work);
3607 struct workqueue_struct *wq = pwq->wq;
3608 struct worker_pool *pool = pwq->pool;
3609 bool is_last;
3610
3611 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3612 return;
3613
3614 /*
3615 * Unlink @pwq. Synchronization against wq->mutex isn't strictly
3616 * necessary on release but do it anyway. It's easier to verify
3617 * and consistent with the linking path.
3618 */
3619 mutex_lock(&wq->mutex);
3620 list_del_rcu(&pwq->pwqs_node);
3621 is_last = list_empty(&wq->pwqs);
3622 mutex_unlock(&wq->mutex);
3623
3624 mutex_lock(&wq_pool_mutex);
3625 put_unbound_pool(pool);
3626 mutex_unlock(&wq_pool_mutex);
3627
3628 call_rcu_sched(&pwq->rcu, rcu_free_pwq);
3629
3630 /*
3631 * If we're the last pwq going away, @wq is already dead and no one
3632 * is gonna access it anymore. Free it.
3633 */
3634 if (is_last) {
3635 free_workqueue_attrs(wq->unbound_attrs);
3636 kfree(wq);
3637 }
3638}
3639
3640/**
3641 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3642 * @pwq: target pool_workqueue
3643 *
3644 * If @pwq isn't freezing, set @pwq->max_active to the associated
3645 * workqueue's saved_max_active and activate delayed work items
3646 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
3647 */
3648static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3649{
3650 struct workqueue_struct *wq = pwq->wq;
3651 bool freezable = wq->flags & WQ_FREEZABLE;
3652
3653 /* for @wq->saved_max_active */
3654 lockdep_assert_held(&wq->mutex);
3655
3656 /* fast exit for non-freezable wqs */
3657 if (!freezable && pwq->max_active == wq->saved_max_active)
3658 return;
3659
3660 spin_lock_irq(&pwq->pool->lock);
3661
3662 if (!freezable || !(pwq->pool->flags & POOL_FREEZING)) {
3663 pwq->max_active = wq->saved_max_active;
3664
3665 while (!list_empty(&pwq->delayed_works) &&
3666 pwq->nr_active < pwq->max_active)
3667 pwq_activate_first_delayed(pwq);
3108 3668
3109 /* 3669 /*
3110 * Allocate enough room to align pwq and put an extra 3670 * Need to kick a worker after thawed or an unbound wq's
3111 * pointer at the end pointing back to the originally 3671 * max_active is bumped. It's a slow path. Do it always.
3112 * allocated pointer which will be used for free.
3113 */ 3672 */
3114 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); 3673 wake_up_worker(pwq->pool);
3115 if (ptr) { 3674 } else {
3116 wq->pool_wq.single = PTR_ALIGN(ptr, align); 3675 pwq->max_active = 0;
3117 *(void **)(wq->pool_wq.single + 1) = ptr; 3676 }
3677
3678 spin_unlock_irq(&pwq->pool->lock);
3679}
3680
3681/* initialize newly alloced @pwq which is associated with @wq and @pool */
3682static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3683 struct worker_pool *pool)
3684{
3685 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3686
3687 memset(pwq, 0, sizeof(*pwq));
3688
3689 pwq->pool = pool;
3690 pwq->wq = wq;
3691 pwq->flush_color = -1;
3692 pwq->refcnt = 1;
3693 INIT_LIST_HEAD(&pwq->delayed_works);
3694 INIT_LIST_HEAD(&pwq->pwqs_node);
3695 INIT_LIST_HEAD(&pwq->mayday_node);
3696 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3697}
3698
3699/* sync @pwq with the current state of its associated wq and link it */
3700static void link_pwq(struct pool_workqueue *pwq)
3701{
3702 struct workqueue_struct *wq = pwq->wq;
3703
3704 lockdep_assert_held(&wq->mutex);
3705
3706 /* may be called multiple times, ignore if already linked */
3707 if (!list_empty(&pwq->pwqs_node))
3708 return;
3709
3710 /*
3711 * Set the matching work_color. This is synchronized with
3712 * wq->mutex to avoid confusing flush_workqueue().
3713 */
3714 pwq->work_color = wq->work_color;
3715
3716 /* sync max_active to the current setting */
3717 pwq_adjust_max_active(pwq);
3718
3719 /* link in @pwq */
3720 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3721}
3722
3723/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3724static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3725 const struct workqueue_attrs *attrs)
3726{
3727 struct worker_pool *pool;
3728 struct pool_workqueue *pwq;
3729
3730 lockdep_assert_held(&wq_pool_mutex);
3731
3732 pool = get_unbound_pool(attrs);
3733 if (!pool)
3734 return NULL;
3735
3736 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3737 if (!pwq) {
3738 put_unbound_pool(pool);
3739 return NULL;
3740 }
3741
3742 init_pwq(pwq, wq, pool);
3743 return pwq;
3744}
3745
3746/* undo alloc_unbound_pwq(), used only in the error path */
3747static void free_unbound_pwq(struct pool_workqueue *pwq)
3748{
3749 lockdep_assert_held(&wq_pool_mutex);
3750
3751 if (pwq) {
3752 put_unbound_pool(pwq->pool);
3753 kfree(pwq);
3754 }
3755}
3756
3757/**
3758 * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node
3759 * @attrs: the wq_attrs of interest
3760 * @node: the target NUMA node
3761 * @cpu_going_down: if >= 0, the CPU to consider as offline
3762 * @cpumask: outarg, the resulting cpumask
3763 *
3764 * Calculate the cpumask a workqueue with @attrs should use on @node. If
3765 * @cpu_going_down is >= 0, that cpu is considered offline during
3766 * calculation. The result is stored in @cpumask. This function returns
3767 * %true if the resulting @cpumask is different from @attrs->cpumask,
3768 * %false if equal.
3769 *
3770 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If
3771 * enabled and @node has online CPUs requested by @attrs, the returned
3772 * cpumask is the intersection of the possible CPUs of @node and
3773 * @attrs->cpumask.
3774 *
3775 * The caller is responsible for ensuring that the cpumask of @node stays
3776 * stable.
3777 */
3778static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3779 int cpu_going_down, cpumask_t *cpumask)
3780{
3781 if (!wq_numa_enabled || attrs->no_numa)
3782 goto use_dfl;
3783
3784 /* does @node have any online CPUs @attrs wants? */
3785 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3786 if (cpu_going_down >= 0)
3787 cpumask_clear_cpu(cpu_going_down, cpumask);
3788
3789 if (cpumask_empty(cpumask))
3790 goto use_dfl;
3791
3792 /* yeap, return possible CPUs in @node that @attrs wants */
3793 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3794 return !cpumask_equal(cpumask, attrs->cpumask);
3795
3796use_dfl:
3797 cpumask_copy(cpumask, attrs->cpumask);
3798 return false;
3799}
3800
3801/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3802static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3803 int node,
3804 struct pool_workqueue *pwq)
3805{
3806 struct pool_workqueue *old_pwq;
3807
3808 lockdep_assert_held(&wq->mutex);
3809
3810 /* link_pwq() can handle duplicate calls */
3811 link_pwq(pwq);
3812
3813 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3814 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3815 return old_pwq;
3816}
3817
3818/**
3819 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
3820 * @wq: the target workqueue
3821 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
3822 *
3823 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
3824 * machines, this function maps a separate pwq to each NUMA node with
3825 * possibles CPUs in @attrs->cpumask so that work items are affine to the
3826 * NUMA node it was issued on. Older pwqs are released as in-flight work
3827 * items finish. Note that a work item which repeatedly requeues itself
3828 * back-to-back will stay on its current pwq.
3829 *
3830 * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on
3831 * failure.
3832 */
3833int apply_workqueue_attrs(struct workqueue_struct *wq,
3834 const struct workqueue_attrs *attrs)
3835{
3836 struct workqueue_attrs *new_attrs, *tmp_attrs;
3837 struct pool_workqueue **pwq_tbl, *dfl_pwq;
3838 int node, ret;
3839
3840 /* only unbound workqueues can change attributes */
3841 if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
3842 return -EINVAL;
3843
3844 /* creating multiple pwqs breaks ordering guarantee */
3845 if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
3846 return -EINVAL;
3847
3848 pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL);
3849 new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3850 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3851 if (!pwq_tbl || !new_attrs || !tmp_attrs)
3852 goto enomem;
3853
3854 /* make a copy of @attrs and sanitize it */
3855 copy_workqueue_attrs(new_attrs, attrs);
3856 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3857
3858 /*
3859 * We may create multiple pwqs with differing cpumasks. Make a
3860 * copy of @new_attrs which will be modified and used to obtain
3861 * pools.
3862 */
3863 copy_workqueue_attrs(tmp_attrs, new_attrs);
3864
3865 /*
3866 * CPUs should stay stable across pwq creations and installations.
3867 * Pin CPUs, determine the target cpumask for each node and create
3868 * pwqs accordingly.
3869 */
3870 get_online_cpus();
3871
3872 mutex_lock(&wq_pool_mutex);
3873
3874 /*
3875 * If something goes wrong during CPU up/down, we'll fall back to
3876 * the default pwq covering whole @attrs->cpumask. Always create
3877 * it even if we don't use it immediately.
3878 */
3879 dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3880 if (!dfl_pwq)
3881 goto enomem_pwq;
3882
3883 for_each_node(node) {
3884 if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) {
3885 pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3886 if (!pwq_tbl[node])
3887 goto enomem_pwq;
3888 } else {
3889 dfl_pwq->refcnt++;
3890 pwq_tbl[node] = dfl_pwq;
3118 } 3891 }
3119 } 3892 }
3120 3893
3121 /* just in case, make sure it's actually aligned */ 3894 mutex_unlock(&wq_pool_mutex);
3122 BUG_ON(!IS_ALIGNED(wq->pool_wq.v, align)); 3895
3123 return wq->pool_wq.v ? 0 : -ENOMEM; 3896 /* all pwqs have been created successfully, let's install'em */
3897 mutex_lock(&wq->mutex);
3898
3899 copy_workqueue_attrs(wq->unbound_attrs, new_attrs);
3900
3901 /* save the previous pwq and install the new one */
3902 for_each_node(node)
3903 pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]);
3904
3905 /* @dfl_pwq might not have been used, ensure it's linked */
3906 link_pwq(dfl_pwq);
3907 swap(wq->dfl_pwq, dfl_pwq);
3908
3909 mutex_unlock(&wq->mutex);
3910
3911 /* put the old pwqs */
3912 for_each_node(node)
3913 put_pwq_unlocked(pwq_tbl[node]);
3914 put_pwq_unlocked(dfl_pwq);
3915
3916 put_online_cpus();
3917 ret = 0;
3918 /* fall through */
3919out_free:
3920 free_workqueue_attrs(tmp_attrs);
3921 free_workqueue_attrs(new_attrs);
3922 kfree(pwq_tbl);
3923 return ret;
3924
3925enomem_pwq:
3926 free_unbound_pwq(dfl_pwq);
3927 for_each_node(node)
3928 if (pwq_tbl && pwq_tbl[node] != dfl_pwq)
3929 free_unbound_pwq(pwq_tbl[node]);
3930 mutex_unlock(&wq_pool_mutex);
3931 put_online_cpus();
3932enomem:
3933 ret = -ENOMEM;
3934 goto out_free;
3124} 3935}
3125 3936
3126static void free_pwqs(struct workqueue_struct *wq) 3937/**
3938 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
3939 * @wq: the target workqueue
3940 * @cpu: the CPU coming up or going down
3941 * @online: whether @cpu is coming up or going down
3942 *
3943 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
3944 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of
3945 * @wq accordingly.
3946 *
3947 * If NUMA affinity can't be adjusted due to memory allocation failure, it
3948 * falls back to @wq->dfl_pwq which may not be optimal but is always
3949 * correct.
3950 *
3951 * Note that when the last allowed CPU of a NUMA node goes offline for a
3952 * workqueue with a cpumask spanning multiple nodes, the workers which were
3953 * already executing the work items for the workqueue will lose their CPU
3954 * affinity and may execute on any CPU. This is similar to how per-cpu
3955 * workqueues behave on CPU_DOWN. If a workqueue user wants strict
3956 * affinity, it's the user's responsibility to flush the work item from
3957 * CPU_DOWN_PREPARE.
3958 */
3959static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
3960 bool online)
3127{ 3961{
3128 if (!(wq->flags & WQ_UNBOUND)) 3962 int node = cpu_to_node(cpu);
3129 free_percpu(wq->pool_wq.pcpu); 3963 int cpu_off = online ? -1 : cpu;
3130 else if (wq->pool_wq.single) { 3964 struct pool_workqueue *old_pwq = NULL, *pwq;
3131 /* the pointer to free is stored right after the pwq */ 3965 struct workqueue_attrs *target_attrs;
3132 kfree(*(void **)(wq->pool_wq.single + 1)); 3966 cpumask_t *cpumask;
3967
3968 lockdep_assert_held(&wq_pool_mutex);
3969
3970 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND))
3971 return;
3972
3973 /*
3974 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
3975 * Let's use a preallocated one. The following buf is protected by
3976 * CPU hotplug exclusion.
3977 */
3978 target_attrs = wq_update_unbound_numa_attrs_buf;
3979 cpumask = target_attrs->cpumask;
3980
3981 mutex_lock(&wq->mutex);
3982 if (wq->unbound_attrs->no_numa)
3983 goto out_unlock;
3984
3985 copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
3986 pwq = unbound_pwq_by_node(wq, node);
3987
3988 /*
3989 * Let's determine what needs to be done. If the target cpumask is
3990 * different from wq's, we need to compare it to @pwq's and create
3991 * a new one if they don't match. If the target cpumask equals
3992 * wq's, the default pwq should be used. If @pwq is already the
3993 * default one, nothing to do; otherwise, install the default one.
3994 */
3995 if (wq_calc_node_cpumask(wq->unbound_attrs, node, cpu_off, cpumask)) {
3996 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
3997 goto out_unlock;
3998 } else {
3999 if (pwq == wq->dfl_pwq)
4000 goto out_unlock;
4001 else
4002 goto use_dfl_pwq;
4003 }
4004
4005 mutex_unlock(&wq->mutex);
4006
4007 /* create a new pwq */
4008 pwq = alloc_unbound_pwq(wq, target_attrs);
4009 if (!pwq) {
4010 pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
4011 wq->name);
4012 goto out_unlock;
4013 }
4014
4015 /*
4016 * Install the new pwq. As this function is called only from CPU
4017 * hotplug callbacks and applying a new attrs is wrapped with
4018 * get/put_online_cpus(), @wq->unbound_attrs couldn't have changed
4019 * inbetween.
4020 */
4021 mutex_lock(&wq->mutex);
4022 old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4023 goto out_unlock;
4024
4025use_dfl_pwq:
4026 spin_lock_irq(&wq->dfl_pwq->pool->lock);
4027 get_pwq(wq->dfl_pwq);
4028 spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4029 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4030out_unlock:
4031 mutex_unlock(&wq->mutex);
4032 put_pwq_unlocked(old_pwq);
4033}
4034
4035static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4036{
4037 bool highpri = wq->flags & WQ_HIGHPRI;
4038 int cpu;
4039
4040 if (!(wq->flags & WQ_UNBOUND)) {
4041 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4042 if (!wq->cpu_pwqs)
4043 return -ENOMEM;
4044
4045 for_each_possible_cpu(cpu) {
4046 struct pool_workqueue *pwq =
4047 per_cpu_ptr(wq->cpu_pwqs, cpu);
4048 struct worker_pool *cpu_pools =
4049 per_cpu(cpu_worker_pools, cpu);
4050
4051 init_pwq(pwq, wq, &cpu_pools[highpri]);
4052
4053 mutex_lock(&wq->mutex);
4054 link_pwq(pwq);
4055 mutex_unlock(&wq->mutex);
4056 }
4057 return 0;
4058 } else {
4059 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
3133 } 4060 }
3134} 4061}
3135 4062
@@ -3151,30 +4078,28 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3151 struct lock_class_key *key, 4078 struct lock_class_key *key,
3152 const char *lock_name, ...) 4079 const char *lock_name, ...)
3153{ 4080{
3154 va_list args, args1; 4081 size_t tbl_size = 0;
4082 va_list args;
3155 struct workqueue_struct *wq; 4083 struct workqueue_struct *wq;
3156 unsigned int cpu; 4084 struct pool_workqueue *pwq;
3157 size_t namelen;
3158 4085
3159 /* determine namelen, allocate wq and format name */ 4086 /* allocate wq and format name */
3160 va_start(args, lock_name); 4087 if (flags & WQ_UNBOUND)
3161 va_copy(args1, args); 4088 tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]);
3162 namelen = vsnprintf(NULL, 0, fmt, args) + 1;
3163 4089
3164 wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL); 4090 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
3165 if (!wq) 4091 if (!wq)
3166 goto err; 4092 return NULL;
3167 4093
3168 vsnprintf(wq->name, namelen, fmt, args1); 4094 if (flags & WQ_UNBOUND) {
3169 va_end(args); 4095 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3170 va_end(args1); 4096 if (!wq->unbound_attrs)
4097 goto err_free_wq;
4098 }
3171 4099
3172 /* 4100 va_start(args, lock_name);
3173 * Workqueues which may be used during memory reclaim should 4101 vsnprintf(wq->name, sizeof(wq->name), fmt, args);
3174 * have a rescuer to guarantee forward progress. 4102 va_end(args);
3175 */
3176 if (flags & WQ_MEM_RECLAIM)
3177 flags |= WQ_RESCUER;
3178 4103
3179 max_active = max_active ?: WQ_DFL_ACTIVE; 4104 max_active = max_active ?: WQ_DFL_ACTIVE;
3180 max_active = wq_clamp_max_active(max_active, flags, wq->name); 4105 max_active = wq_clamp_max_active(max_active, flags, wq->name);
@@ -3182,71 +4107,70 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3182 /* init wq */ 4107 /* init wq */
3183 wq->flags = flags; 4108 wq->flags = flags;
3184 wq->saved_max_active = max_active; 4109 wq->saved_max_active = max_active;
3185 mutex_init(&wq->flush_mutex); 4110 mutex_init(&wq->mutex);
3186 atomic_set(&wq->nr_pwqs_to_flush, 0); 4111 atomic_set(&wq->nr_pwqs_to_flush, 0);
4112 INIT_LIST_HEAD(&wq->pwqs);
3187 INIT_LIST_HEAD(&wq->flusher_queue); 4113 INIT_LIST_HEAD(&wq->flusher_queue);
3188 INIT_LIST_HEAD(&wq->flusher_overflow); 4114 INIT_LIST_HEAD(&wq->flusher_overflow);
4115 INIT_LIST_HEAD(&wq->maydays);
3189 4116
3190 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 4117 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
3191 INIT_LIST_HEAD(&wq->list); 4118 INIT_LIST_HEAD(&wq->list);
3192 4119
3193 if (alloc_pwqs(wq) < 0) 4120 if (alloc_and_link_pwqs(wq) < 0)
3194 goto err; 4121 goto err_free_wq;
3195
3196 for_each_pwq_cpu(cpu, wq) {
3197 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3198 4122
3199 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 4123 /*
3200 pwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI); 4124 * Workqueues which may be used during memory reclaim should
3201 pwq->wq = wq; 4125 * have a rescuer to guarantee forward progress.
3202 pwq->flush_color = -1; 4126 */
3203 pwq->max_active = max_active; 4127 if (flags & WQ_MEM_RECLAIM) {
3204 INIT_LIST_HEAD(&pwq->delayed_works);
3205 }
3206
3207 if (flags & WQ_RESCUER) {
3208 struct worker *rescuer; 4128 struct worker *rescuer;
3209 4129
3210 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL)) 4130 rescuer = alloc_worker();
3211 goto err;
3212
3213 wq->rescuer = rescuer = alloc_worker();
3214 if (!rescuer) 4131 if (!rescuer)
3215 goto err; 4132 goto err_destroy;
3216 4133
3217 rescuer->rescue_wq = wq; 4134 rescuer->rescue_wq = wq;
3218 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", 4135 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
3219 wq->name); 4136 wq->name);
3220 if (IS_ERR(rescuer->task)) 4137 if (IS_ERR(rescuer->task)) {
3221 goto err; 4138 kfree(rescuer);
4139 goto err_destroy;
4140 }
3222 4141
3223 rescuer->task->flags |= PF_THREAD_BOUND; 4142 wq->rescuer = rescuer;
4143 rescuer->task->flags |= PF_NO_SETAFFINITY;
3224 wake_up_process(rescuer->task); 4144 wake_up_process(rescuer->task);
3225 } 4145 }
3226 4146
4147 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4148 goto err_destroy;
4149
3227 /* 4150 /*
3228 * workqueue_lock protects global freeze state and workqueues 4151 * wq_pool_mutex protects global freeze state and workqueues list.
3229 * list. Grab it, set max_active accordingly and add the new 4152 * Grab it, adjust max_active and add the new @wq to workqueues
3230 * workqueue to workqueues list. 4153 * list.
3231 */ 4154 */
3232 spin_lock(&workqueue_lock); 4155 mutex_lock(&wq_pool_mutex);
3233 4156
3234 if (workqueue_freezing && wq->flags & WQ_FREEZABLE) 4157 mutex_lock(&wq->mutex);
3235 for_each_pwq_cpu(cpu, wq) 4158 for_each_pwq(pwq, wq)
3236 get_pwq(cpu, wq)->max_active = 0; 4159 pwq_adjust_max_active(pwq);
4160 mutex_unlock(&wq->mutex);
3237 4161
3238 list_add(&wq->list, &workqueues); 4162 list_add(&wq->list, &workqueues);
3239 4163
3240 spin_unlock(&workqueue_lock); 4164 mutex_unlock(&wq_pool_mutex);
3241 4165
3242 return wq; 4166 return wq;
3243err: 4167
3244 if (wq) { 4168err_free_wq:
3245 free_pwqs(wq); 4169 free_workqueue_attrs(wq->unbound_attrs);
3246 free_mayday_mask(wq->mayday_mask); 4170 kfree(wq);
3247 kfree(wq->rescuer); 4171 return NULL;
3248 kfree(wq); 4172err_destroy:
3249 } 4173 destroy_workqueue(wq);
3250 return NULL; 4174 return NULL;
3251} 4175}
3252EXPORT_SYMBOL_GPL(__alloc_workqueue_key); 4176EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
@@ -3259,60 +4183,78 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
3259 */ 4183 */
3260void destroy_workqueue(struct workqueue_struct *wq) 4184void destroy_workqueue(struct workqueue_struct *wq)
3261{ 4185{
3262 unsigned int cpu; 4186 struct pool_workqueue *pwq;
4187 int node;
3263 4188
3264 /* drain it before proceeding with destruction */ 4189 /* drain it before proceeding with destruction */
3265 drain_workqueue(wq); 4190 drain_workqueue(wq);
3266 4191
4192 /* sanity checks */
4193 mutex_lock(&wq->mutex);
4194 for_each_pwq(pwq, wq) {
4195 int i;
4196
4197 for (i = 0; i < WORK_NR_COLORS; i++) {
4198 if (WARN_ON(pwq->nr_in_flight[i])) {
4199 mutex_unlock(&wq->mutex);
4200 return;
4201 }
4202 }
4203
4204 if (WARN_ON(pwq->refcnt > 1) ||
4205 WARN_ON(pwq->nr_active) ||
4206 WARN_ON(!list_empty(&pwq->delayed_works))) {
4207 mutex_unlock(&wq->mutex);
4208 return;
4209 }
4210 }
4211 mutex_unlock(&wq->mutex);
4212
3267 /* 4213 /*
3268 * wq list is used to freeze wq, remove from list after 4214 * wq list is used to freeze wq, remove from list after
3269 * flushing is complete in case freeze races us. 4215 * flushing is complete in case freeze races us.
3270 */ 4216 */
3271 spin_lock(&workqueue_lock); 4217 mutex_lock(&wq_pool_mutex);
3272 list_del(&wq->list); 4218 list_del_init(&wq->list);
3273 spin_unlock(&workqueue_lock); 4219 mutex_unlock(&wq_pool_mutex);
3274 4220
3275 /* sanity check */ 4221 workqueue_sysfs_unregister(wq);
3276 for_each_pwq_cpu(cpu, wq) {
3277 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3278 int i;
3279 4222
3280 for (i = 0; i < WORK_NR_COLORS; i++) 4223 if (wq->rescuer) {
3281 BUG_ON(pwq->nr_in_flight[i]);
3282 BUG_ON(pwq->nr_active);
3283 BUG_ON(!list_empty(&pwq->delayed_works));
3284 }
3285
3286 if (wq->flags & WQ_RESCUER) {
3287 kthread_stop(wq->rescuer->task); 4224 kthread_stop(wq->rescuer->task);
3288 free_mayday_mask(wq->mayday_mask);
3289 kfree(wq->rescuer); 4225 kfree(wq->rescuer);
4226 wq->rescuer = NULL;
3290 } 4227 }
3291 4228
3292 free_pwqs(wq); 4229 if (!(wq->flags & WQ_UNBOUND)) {
3293 kfree(wq); 4230 /*
3294} 4231 * The base ref is never dropped on per-cpu pwqs. Directly
3295EXPORT_SYMBOL_GPL(destroy_workqueue); 4232 * free the pwqs and wq.
3296 4233 */
3297/** 4234 free_percpu(wq->cpu_pwqs);
3298 * pwq_set_max_active - adjust max_active of a pwq 4235 kfree(wq);
3299 * @pwq: target pool_workqueue 4236 } else {
3300 * @max_active: new max_active value. 4237 /*
3301 * 4238 * We're the sole accessor of @wq at this point. Directly
3302 * Set @pwq->max_active to @max_active and activate delayed works if 4239 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
3303 * increased. 4240 * @wq will be freed when the last pwq is released.
3304 * 4241 */
3305 * CONTEXT: 4242 for_each_node(node) {
3306 * spin_lock_irq(pool->lock). 4243 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3307 */ 4244 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
3308static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active) 4245 put_pwq_unlocked(pwq);
3309{ 4246 }
3310 pwq->max_active = max_active;
3311 4247
3312 while (!list_empty(&pwq->delayed_works) && 4248 /*
3313 pwq->nr_active < pwq->max_active) 4249 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is
3314 pwq_activate_first_delayed(pwq); 4250 * put. Don't access it afterwards.
4251 */
4252 pwq = wq->dfl_pwq;
4253 wq->dfl_pwq = NULL;
4254 put_pwq_unlocked(pwq);
4255 }
3315} 4256}
4257EXPORT_SYMBOL_GPL(destroy_workqueue);
3316 4258
3317/** 4259/**
3318 * workqueue_set_max_active - adjust max_active of a workqueue 4260 * workqueue_set_max_active - adjust max_active of a workqueue
@@ -3326,30 +4268,37 @@ static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active)
3326 */ 4268 */
3327void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 4269void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3328{ 4270{
3329 unsigned int cpu; 4271 struct pool_workqueue *pwq;
4272
4273 /* disallow meddling with max_active for ordered workqueues */
4274 if (WARN_ON(wq->flags & __WQ_ORDERED))
4275 return;
3330 4276
3331 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 4277 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3332 4278
3333 spin_lock(&workqueue_lock); 4279 mutex_lock(&wq->mutex);
3334 4280
3335 wq->saved_max_active = max_active; 4281 wq->saved_max_active = max_active;
3336 4282
3337 for_each_pwq_cpu(cpu, wq) { 4283 for_each_pwq(pwq, wq)
3338 struct pool_workqueue *pwq = get_pwq(cpu, wq); 4284 pwq_adjust_max_active(pwq);
3339 struct worker_pool *pool = pwq->pool;
3340
3341 spin_lock_irq(&pool->lock);
3342 4285
3343 if (!(wq->flags & WQ_FREEZABLE) || 4286 mutex_unlock(&wq->mutex);
3344 !(pool->flags & POOL_FREEZING)) 4287}
3345 pwq_set_max_active(pwq, max_active); 4288EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3346 4289
3347 spin_unlock_irq(&pool->lock); 4290/**
3348 } 4291 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4292 *
4293 * Determine whether %current is a workqueue rescuer. Can be used from
4294 * work functions to determine whether it's being run off the rescuer task.
4295 */
4296bool current_is_workqueue_rescuer(void)
4297{
4298 struct worker *worker = current_wq_worker();
3349 4299
3350 spin_unlock(&workqueue_lock); 4300 return worker && worker->rescue_wq;
3351} 4301}
3352EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3353 4302
3354/** 4303/**
3355 * workqueue_congested - test whether a workqueue is congested 4304 * workqueue_congested - test whether a workqueue is congested
@@ -3363,11 +4312,22 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3363 * RETURNS: 4312 * RETURNS:
3364 * %true if congested, %false otherwise. 4313 * %true if congested, %false otherwise.
3365 */ 4314 */
3366bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) 4315bool workqueue_congested(int cpu, struct workqueue_struct *wq)
3367{ 4316{
3368 struct pool_workqueue *pwq = get_pwq(cpu, wq); 4317 struct pool_workqueue *pwq;
4318 bool ret;
4319
4320 rcu_read_lock_sched();
4321
4322 if (!(wq->flags & WQ_UNBOUND))
4323 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4324 else
4325 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4326
4327 ret = !list_empty(&pwq->delayed_works);
4328 rcu_read_unlock_sched();
3369 4329
3370 return !list_empty(&pwq->delayed_works); 4330 return ret;
3371} 4331}
3372EXPORT_SYMBOL_GPL(workqueue_congested); 4332EXPORT_SYMBOL_GPL(workqueue_congested);
3373 4333
@@ -3384,19 +4344,22 @@ EXPORT_SYMBOL_GPL(workqueue_congested);
3384 */ 4344 */
3385unsigned int work_busy(struct work_struct *work) 4345unsigned int work_busy(struct work_struct *work)
3386{ 4346{
3387 struct worker_pool *pool = get_work_pool(work); 4347 struct worker_pool *pool;
3388 unsigned long flags; 4348 unsigned long flags;
3389 unsigned int ret = 0; 4349 unsigned int ret = 0;
3390 4350
3391 if (work_pending(work)) 4351 if (work_pending(work))
3392 ret |= WORK_BUSY_PENDING; 4352 ret |= WORK_BUSY_PENDING;
3393 4353
4354 local_irq_save(flags);
4355 pool = get_work_pool(work);
3394 if (pool) { 4356 if (pool) {
3395 spin_lock_irqsave(&pool->lock, flags); 4357 spin_lock(&pool->lock);
3396 if (find_worker_executing_work(pool, work)) 4358 if (find_worker_executing_work(pool, work))
3397 ret |= WORK_BUSY_RUNNING; 4359 ret |= WORK_BUSY_RUNNING;
3398 spin_unlock_irqrestore(&pool->lock, flags); 4360 spin_unlock(&pool->lock);
3399 } 4361 }
4362 local_irq_restore(flags);
3400 4363
3401 return ret; 4364 return ret;
3402} 4365}
@@ -3422,53 +4385,153 @@ static void wq_unbind_fn(struct work_struct *work)
3422 int cpu = smp_processor_id(); 4385 int cpu = smp_processor_id();
3423 struct worker_pool *pool; 4386 struct worker_pool *pool;
3424 struct worker *worker; 4387 struct worker *worker;
3425 int i; 4388 int wi;
3426 4389
3427 for_each_std_worker_pool(pool, cpu) { 4390 for_each_cpu_worker_pool(pool, cpu) {
3428 BUG_ON(cpu != smp_processor_id()); 4391 WARN_ON_ONCE(cpu != smp_processor_id());
3429 4392
3430 mutex_lock(&pool->assoc_mutex); 4393 mutex_lock(&pool->manager_mutex);
3431 spin_lock_irq(&pool->lock); 4394 spin_lock_irq(&pool->lock);
3432 4395
3433 /* 4396 /*
3434 * We've claimed all manager positions. Make all workers 4397 * We've blocked all manager operations. Make all workers
3435 * unbound and set DISASSOCIATED. Before this, all workers 4398 * unbound and set DISASSOCIATED. Before this, all workers
3436 * except for the ones which are still executing works from 4399 * except for the ones which are still executing works from
3437 * before the last CPU down must be on the cpu. After 4400 * before the last CPU down must be on the cpu. After
3438 * this, they may become diasporas. 4401 * this, they may become diasporas.
3439 */ 4402 */
3440 list_for_each_entry(worker, &pool->idle_list, entry) 4403 for_each_pool_worker(worker, wi, pool)
3441 worker->flags |= WORKER_UNBOUND;
3442
3443 for_each_busy_worker(worker, i, pool)
3444 worker->flags |= WORKER_UNBOUND; 4404 worker->flags |= WORKER_UNBOUND;
3445 4405
3446 pool->flags |= POOL_DISASSOCIATED; 4406 pool->flags |= POOL_DISASSOCIATED;
3447 4407
3448 spin_unlock_irq(&pool->lock); 4408 spin_unlock_irq(&pool->lock);
3449 mutex_unlock(&pool->assoc_mutex); 4409 mutex_unlock(&pool->manager_mutex);
4410
4411 /*
4412 * Call schedule() so that we cross rq->lock and thus can
4413 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4414 * This is necessary as scheduler callbacks may be invoked
4415 * from other cpus.
4416 */
4417 schedule();
4418
4419 /*
4420 * Sched callbacks are disabled now. Zap nr_running.
4421 * After this, nr_running stays zero and need_more_worker()
4422 * and keep_working() are always true as long as the
4423 * worklist is not empty. This pool now behaves as an
4424 * unbound (in terms of concurrency management) pool which
4425 * are served by workers tied to the pool.
4426 */
4427 atomic_set(&pool->nr_running, 0);
4428
4429 /*
4430 * With concurrency management just turned off, a busy
4431 * worker blocking could lead to lengthy stalls. Kick off
4432 * unbound chain execution of currently pending work items.
4433 */
4434 spin_lock_irq(&pool->lock);
4435 wake_up_worker(pool);
4436 spin_unlock_irq(&pool->lock);
3450 } 4437 }
4438}
3451 4439
3452 /* 4440/**
3453 * Call schedule() so that we cross rq->lock and thus can guarantee 4441 * rebind_workers - rebind all workers of a pool to the associated CPU
3454 * sched callbacks see the %WORKER_UNBOUND flag. This is necessary 4442 * @pool: pool of interest
3455 * as scheduler callbacks may be invoked from other cpus. 4443 *
3456 */ 4444 * @pool->cpu is coming online. Rebind all workers to the CPU.
3457 schedule(); 4445 */
4446static void rebind_workers(struct worker_pool *pool)
4447{
4448 struct worker *worker;
4449 int wi;
4450
4451 lockdep_assert_held(&pool->manager_mutex);
3458 4452
3459 /* 4453 /*
3460 * Sched callbacks are disabled now. Zap nr_running. After this, 4454 * Restore CPU affinity of all workers. As all idle workers should
3461 * nr_running stays zero and need_more_worker() and keep_working() 4455 * be on the run-queue of the associated CPU before any local
3462 * are always true as long as the worklist is not empty. Pools on 4456 * wake-ups for concurrency management happen, restore CPU affinty
3463 * @cpu now behave as unbound (in terms of concurrency management) 4457 * of all workers first and then clear UNBOUND. As we're called
3464 * pools which are served by workers tied to the CPU. 4458 * from CPU_ONLINE, the following shouldn't fail.
3465 *
3466 * On return from this function, the current worker would trigger
3467 * unbound chain execution of pending work items if other workers
3468 * didn't already.
3469 */ 4459 */
3470 for_each_std_worker_pool(pool, cpu) 4460 for_each_pool_worker(worker, wi, pool)
3471 atomic_set(&pool->nr_running, 0); 4461 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4462 pool->attrs->cpumask) < 0);
4463
4464 spin_lock_irq(&pool->lock);
4465
4466 for_each_pool_worker(worker, wi, pool) {
4467 unsigned int worker_flags = worker->flags;
4468
4469 /*
4470 * A bound idle worker should actually be on the runqueue
4471 * of the associated CPU for local wake-ups targeting it to
4472 * work. Kick all idle workers so that they migrate to the
4473 * associated CPU. Doing this in the same loop as
4474 * replacing UNBOUND with REBOUND is safe as no worker will
4475 * be bound before @pool->lock is released.
4476 */
4477 if (worker_flags & WORKER_IDLE)
4478 wake_up_process(worker->task);
4479
4480 /*
4481 * We want to clear UNBOUND but can't directly call
4482 * worker_clr_flags() or adjust nr_running. Atomically
4483 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
4484 * @worker will clear REBOUND using worker_clr_flags() when
4485 * it initiates the next execution cycle thus restoring
4486 * concurrency management. Note that when or whether
4487 * @worker clears REBOUND doesn't affect correctness.
4488 *
4489 * ACCESS_ONCE() is necessary because @worker->flags may be
4490 * tested without holding any lock in
4491 * wq_worker_waking_up(). Without it, NOT_RUNNING test may
4492 * fail incorrectly leading to premature concurrency
4493 * management operations.
4494 */
4495 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4496 worker_flags |= WORKER_REBOUND;
4497 worker_flags &= ~WORKER_UNBOUND;
4498 ACCESS_ONCE(worker->flags) = worker_flags;
4499 }
4500
4501 spin_unlock_irq(&pool->lock);
4502}
4503
4504/**
4505 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
4506 * @pool: unbound pool of interest
4507 * @cpu: the CPU which is coming up
4508 *
4509 * An unbound pool may end up with a cpumask which doesn't have any online
4510 * CPUs. When a worker of such pool get scheduled, the scheduler resets
4511 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
4512 * online CPU before, cpus_allowed of all its workers should be restored.
4513 */
4514static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4515{
4516 static cpumask_t cpumask;
4517 struct worker *worker;
4518 int wi;
4519
4520 lockdep_assert_held(&pool->manager_mutex);
4521
4522 /* is @cpu allowed for @pool? */
4523 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4524 return;
4525
4526 /* is @cpu the only online CPU? */
4527 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
4528 if (cpumask_weight(&cpumask) != 1)
4529 return;
4530
4531 /* as we're called from CPU_ONLINE, the following shouldn't fail */
4532 for_each_pool_worker(worker, wi, pool)
4533 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4534 pool->attrs->cpumask) < 0);
3472} 4535}
3473 4536
3474/* 4537/*
@@ -3479,39 +4542,46 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3479 unsigned long action, 4542 unsigned long action,
3480 void *hcpu) 4543 void *hcpu)
3481{ 4544{
3482 unsigned int cpu = (unsigned long)hcpu; 4545 int cpu = (unsigned long)hcpu;
3483 struct worker_pool *pool; 4546 struct worker_pool *pool;
4547 struct workqueue_struct *wq;
4548 int pi;
3484 4549
3485 switch (action & ~CPU_TASKS_FROZEN) { 4550 switch (action & ~CPU_TASKS_FROZEN) {
3486 case CPU_UP_PREPARE: 4551 case CPU_UP_PREPARE:
3487 for_each_std_worker_pool(pool, cpu) { 4552 for_each_cpu_worker_pool(pool, cpu) {
3488 struct worker *worker;
3489
3490 if (pool->nr_workers) 4553 if (pool->nr_workers)
3491 continue; 4554 continue;
3492 4555 if (create_and_start_worker(pool) < 0)
3493 worker = create_worker(pool);
3494 if (!worker)
3495 return NOTIFY_BAD; 4556 return NOTIFY_BAD;
3496
3497 spin_lock_irq(&pool->lock);
3498 start_worker(worker);
3499 spin_unlock_irq(&pool->lock);
3500 } 4557 }
3501 break; 4558 break;
3502 4559
3503 case CPU_DOWN_FAILED: 4560 case CPU_DOWN_FAILED:
3504 case CPU_ONLINE: 4561 case CPU_ONLINE:
3505 for_each_std_worker_pool(pool, cpu) { 4562 mutex_lock(&wq_pool_mutex);
3506 mutex_lock(&pool->assoc_mutex);
3507 spin_lock_irq(&pool->lock);
3508 4563
3509 pool->flags &= ~POOL_DISASSOCIATED; 4564 for_each_pool(pool, pi) {
3510 rebind_workers(pool); 4565 mutex_lock(&pool->manager_mutex);
4566
4567 if (pool->cpu == cpu) {
4568 spin_lock_irq(&pool->lock);
4569 pool->flags &= ~POOL_DISASSOCIATED;
4570 spin_unlock_irq(&pool->lock);
4571
4572 rebind_workers(pool);
4573 } else if (pool->cpu < 0) {
4574 restore_unbound_workers_cpumask(pool, cpu);
4575 }
3511 4576
3512 spin_unlock_irq(&pool->lock); 4577 mutex_unlock(&pool->manager_mutex);
3513 mutex_unlock(&pool->assoc_mutex);
3514 } 4578 }
4579
4580 /* update NUMA affinity of unbound workqueues */
4581 list_for_each_entry(wq, &workqueues, list)
4582 wq_update_unbound_numa(wq, cpu, true);
4583
4584 mutex_unlock(&wq_pool_mutex);
3515 break; 4585 break;
3516 } 4586 }
3517 return NOTIFY_OK; 4587 return NOTIFY_OK;
@@ -3525,14 +4595,23 @@ static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3525 unsigned long action, 4595 unsigned long action,
3526 void *hcpu) 4596 void *hcpu)
3527{ 4597{
3528 unsigned int cpu = (unsigned long)hcpu; 4598 int cpu = (unsigned long)hcpu;
3529 struct work_struct unbind_work; 4599 struct work_struct unbind_work;
4600 struct workqueue_struct *wq;
3530 4601
3531 switch (action & ~CPU_TASKS_FROZEN) { 4602 switch (action & ~CPU_TASKS_FROZEN) {
3532 case CPU_DOWN_PREPARE: 4603 case CPU_DOWN_PREPARE:
3533 /* unbinding should happen on the local CPU */ 4604 /* unbinding per-cpu workers should happen on the local CPU */
3534 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 4605 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
3535 queue_work_on(cpu, system_highpri_wq, &unbind_work); 4606 queue_work_on(cpu, system_highpri_wq, &unbind_work);
4607
4608 /* update NUMA affinity of unbound workqueues */
4609 mutex_lock(&wq_pool_mutex);
4610 list_for_each_entry(wq, &workqueues, list)
4611 wq_update_unbound_numa(wq, cpu, false);
4612 mutex_unlock(&wq_pool_mutex);
4613
4614 /* wait for per-cpu unbinding to finish */
3536 flush_work(&unbind_work); 4615 flush_work(&unbind_work);
3537 break; 4616 break;
3538 } 4617 }
@@ -3565,7 +4644,7 @@ static void work_for_cpu_fn(struct work_struct *work)
3565 * It is up to the caller to ensure that the cpu doesn't go offline. 4644 * It is up to the caller to ensure that the cpu doesn't go offline.
3566 * The caller must not hold any locks which would prevent @fn from completing. 4645 * The caller must not hold any locks which would prevent @fn from completing.
3567 */ 4646 */
3568long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 4647long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
3569{ 4648{
3570 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 4649 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
3571 4650
@@ -3583,44 +4662,40 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
3583 * freeze_workqueues_begin - begin freezing workqueues 4662 * freeze_workqueues_begin - begin freezing workqueues
3584 * 4663 *
3585 * Start freezing workqueues. After this function returns, all freezable 4664 * Start freezing workqueues. After this function returns, all freezable
3586 * workqueues will queue new works to their frozen_works list instead of 4665 * workqueues will queue new works to their delayed_works list instead of
3587 * pool->worklist. 4666 * pool->worklist.
3588 * 4667 *
3589 * CONTEXT: 4668 * CONTEXT:
3590 * Grabs and releases workqueue_lock and pool->lock's. 4669 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
3591 */ 4670 */
3592void freeze_workqueues_begin(void) 4671void freeze_workqueues_begin(void)
3593{ 4672{
3594 unsigned int cpu; 4673 struct worker_pool *pool;
4674 struct workqueue_struct *wq;
4675 struct pool_workqueue *pwq;
4676 int pi;
3595 4677
3596 spin_lock(&workqueue_lock); 4678 mutex_lock(&wq_pool_mutex);
3597 4679
3598 BUG_ON(workqueue_freezing); 4680 WARN_ON_ONCE(workqueue_freezing);
3599 workqueue_freezing = true; 4681 workqueue_freezing = true;
3600 4682
3601 for_each_wq_cpu(cpu) { 4683 /* set FREEZING */
3602 struct worker_pool *pool; 4684 for_each_pool(pool, pi) {
3603 struct workqueue_struct *wq; 4685 spin_lock_irq(&pool->lock);
3604 4686 WARN_ON_ONCE(pool->flags & POOL_FREEZING);
3605 for_each_std_worker_pool(pool, cpu) { 4687 pool->flags |= POOL_FREEZING;
3606 spin_lock_irq(&pool->lock); 4688 spin_unlock_irq(&pool->lock);
3607 4689 }
3608 WARN_ON_ONCE(pool->flags & POOL_FREEZING);
3609 pool->flags |= POOL_FREEZING;
3610
3611 list_for_each_entry(wq, &workqueues, list) {
3612 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3613
3614 if (pwq && pwq->pool == pool &&
3615 (wq->flags & WQ_FREEZABLE))
3616 pwq->max_active = 0;
3617 }
3618 4690
3619 spin_unlock_irq(&pool->lock); 4691 list_for_each_entry(wq, &workqueues, list) {
3620 } 4692 mutex_lock(&wq->mutex);
4693 for_each_pwq(pwq, wq)
4694 pwq_adjust_max_active(pwq);
4695 mutex_unlock(&wq->mutex);
3621 } 4696 }
3622 4697
3623 spin_unlock(&workqueue_lock); 4698 mutex_unlock(&wq_pool_mutex);
3624} 4699}
3625 4700
3626/** 4701/**
@@ -3630,7 +4705,7 @@ void freeze_workqueues_begin(void)
3630 * between freeze_workqueues_begin() and thaw_workqueues(). 4705 * between freeze_workqueues_begin() and thaw_workqueues().
3631 * 4706 *
3632 * CONTEXT: 4707 * CONTEXT:
3633 * Grabs and releases workqueue_lock. 4708 * Grabs and releases wq_pool_mutex.
3634 * 4709 *
3635 * RETURNS: 4710 * RETURNS:
3636 * %true if some freezable workqueues are still busy. %false if freezing 4711 * %true if some freezable workqueues are still busy. %false if freezing
@@ -3638,34 +4713,34 @@ void freeze_workqueues_begin(void)
3638 */ 4713 */
3639bool freeze_workqueues_busy(void) 4714bool freeze_workqueues_busy(void)
3640{ 4715{
3641 unsigned int cpu;
3642 bool busy = false; 4716 bool busy = false;
4717 struct workqueue_struct *wq;
4718 struct pool_workqueue *pwq;
3643 4719
3644 spin_lock(&workqueue_lock); 4720 mutex_lock(&wq_pool_mutex);
3645 4721
3646 BUG_ON(!workqueue_freezing); 4722 WARN_ON_ONCE(!workqueue_freezing);
3647 4723
3648 for_each_wq_cpu(cpu) { 4724 list_for_each_entry(wq, &workqueues, list) {
3649 struct workqueue_struct *wq; 4725 if (!(wq->flags & WQ_FREEZABLE))
4726 continue;
3650 /* 4727 /*
3651 * nr_active is monotonically decreasing. It's safe 4728 * nr_active is monotonically decreasing. It's safe
3652 * to peek without lock. 4729 * to peek without lock.
3653 */ 4730 */
3654 list_for_each_entry(wq, &workqueues, list) { 4731 rcu_read_lock_sched();
3655 struct pool_workqueue *pwq = get_pwq(cpu, wq); 4732 for_each_pwq(pwq, wq) {
3656 4733 WARN_ON_ONCE(pwq->nr_active < 0);
3657 if (!pwq || !(wq->flags & WQ_FREEZABLE))
3658 continue;
3659
3660 BUG_ON(pwq->nr_active < 0);
3661 if (pwq->nr_active) { 4734 if (pwq->nr_active) {
3662 busy = true; 4735 busy = true;
4736 rcu_read_unlock_sched();
3663 goto out_unlock; 4737 goto out_unlock;
3664 } 4738 }
3665 } 4739 }
4740 rcu_read_unlock_sched();
3666 } 4741 }
3667out_unlock: 4742out_unlock:
3668 spin_unlock(&workqueue_lock); 4743 mutex_unlock(&wq_pool_mutex);
3669 return busy; 4744 return busy;
3670} 4745}
3671 4746
@@ -3676,104 +4751,141 @@ out_unlock:
3676 * frozen works are transferred to their respective pool worklists. 4751 * frozen works are transferred to their respective pool worklists.
3677 * 4752 *
3678 * CONTEXT: 4753 * CONTEXT:
3679 * Grabs and releases workqueue_lock and pool->lock's. 4754 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
3680 */ 4755 */
3681void thaw_workqueues(void) 4756void thaw_workqueues(void)
3682{ 4757{
3683 unsigned int cpu; 4758 struct workqueue_struct *wq;
4759 struct pool_workqueue *pwq;
4760 struct worker_pool *pool;
4761 int pi;
3684 4762
3685 spin_lock(&workqueue_lock); 4763 mutex_lock(&wq_pool_mutex);
3686 4764
3687 if (!workqueue_freezing) 4765 if (!workqueue_freezing)
3688 goto out_unlock; 4766 goto out_unlock;
3689 4767
3690 for_each_wq_cpu(cpu) { 4768 /* clear FREEZING */
3691 struct worker_pool *pool; 4769 for_each_pool(pool, pi) {
3692 struct workqueue_struct *wq; 4770 spin_lock_irq(&pool->lock);
4771 WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
4772 pool->flags &= ~POOL_FREEZING;
4773 spin_unlock_irq(&pool->lock);
4774 }
3693 4775
3694 for_each_std_worker_pool(pool, cpu) { 4776 /* restore max_active and repopulate worklist */
3695 spin_lock_irq(&pool->lock); 4777 list_for_each_entry(wq, &workqueues, list) {
4778 mutex_lock(&wq->mutex);
4779 for_each_pwq(pwq, wq)
4780 pwq_adjust_max_active(pwq);
4781 mutex_unlock(&wq->mutex);
4782 }
4783
4784 workqueue_freezing = false;
4785out_unlock:
4786 mutex_unlock(&wq_pool_mutex);
4787}
4788#endif /* CONFIG_FREEZER */
3696 4789
3697 WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); 4790static void __init wq_numa_init(void)
3698 pool->flags &= ~POOL_FREEZING; 4791{
4792 cpumask_var_t *tbl;
4793 int node, cpu;
3699 4794
3700 list_for_each_entry(wq, &workqueues, list) { 4795 /* determine NUMA pwq table len - highest node id + 1 */
3701 struct pool_workqueue *pwq = get_pwq(cpu, wq); 4796 for_each_node(node)
4797 wq_numa_tbl_len = max(wq_numa_tbl_len, node + 1);
3702 4798
3703 if (!pwq || pwq->pool != pool || 4799 if (num_possible_nodes() <= 1)
3704 !(wq->flags & WQ_FREEZABLE)) 4800 return;
3705 continue;
3706 4801
3707 /* restore max_active and repopulate worklist */ 4802 if (wq_disable_numa) {
3708 pwq_set_max_active(pwq, wq->saved_max_active); 4803 pr_info("workqueue: NUMA affinity support disabled\n");
3709 } 4804 return;
4805 }
3710 4806
3711 wake_up_worker(pool); 4807 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
4808 BUG_ON(!wq_update_unbound_numa_attrs_buf);
3712 4809
3713 spin_unlock_irq(&pool->lock); 4810 /*
4811 * We want masks of possible CPUs of each node which isn't readily
4812 * available. Build one from cpu_to_node() which should have been
4813 * fully initialized by now.
4814 */
4815 tbl = kzalloc(wq_numa_tbl_len * sizeof(tbl[0]), GFP_KERNEL);
4816 BUG_ON(!tbl);
4817
4818 for_each_node(node)
4819 BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node));
4820
4821 for_each_possible_cpu(cpu) {
4822 node = cpu_to_node(cpu);
4823 if (WARN_ON(node == NUMA_NO_NODE)) {
4824 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
4825 /* happens iff arch is bonkers, let's just proceed */
4826 return;
3714 } 4827 }
4828 cpumask_set_cpu(cpu, tbl[node]);
3715 } 4829 }
3716 4830
3717 workqueue_freezing = false; 4831 wq_numa_possible_cpumask = tbl;
3718out_unlock: 4832 wq_numa_enabled = true;
3719 spin_unlock(&workqueue_lock);
3720} 4833}
3721#endif /* CONFIG_FREEZER */
3722 4834
3723static int __init init_workqueues(void) 4835static int __init init_workqueues(void)
3724{ 4836{
3725 unsigned int cpu; 4837 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
4838 int i, cpu;
3726 4839
3727 /* make sure we have enough bits for OFFQ pool ID */ 4840 /* make sure we have enough bits for OFFQ pool ID */
3728 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < 4841 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
3729 WORK_CPU_END * NR_STD_WORKER_POOLS); 4842 WORK_CPU_END * NR_STD_WORKER_POOLS);
3730 4843
4844 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
4845
4846 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
4847
3731 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); 4848 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
3732 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); 4849 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
3733 4850
4851 wq_numa_init();
4852
3734 /* initialize CPU pools */ 4853 /* initialize CPU pools */
3735 for_each_wq_cpu(cpu) { 4854 for_each_possible_cpu(cpu) {
3736 struct worker_pool *pool; 4855 struct worker_pool *pool;
3737 4856
3738 for_each_std_worker_pool(pool, cpu) { 4857 i = 0;
3739 spin_lock_init(&pool->lock); 4858 for_each_cpu_worker_pool(pool, cpu) {
4859 BUG_ON(init_worker_pool(pool));
3740 pool->cpu = cpu; 4860 pool->cpu = cpu;
3741 pool->flags |= POOL_DISASSOCIATED; 4861 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
3742 INIT_LIST_HEAD(&pool->worklist); 4862 pool->attrs->nice = std_nice[i++];
3743 INIT_LIST_HEAD(&pool->idle_list); 4863 pool->node = cpu_to_node(cpu);
3744 hash_init(pool->busy_hash);
3745
3746 init_timer_deferrable(&pool->idle_timer);
3747 pool->idle_timer.function = idle_worker_timeout;
3748 pool->idle_timer.data = (unsigned long)pool;
3749
3750 setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3751 (unsigned long)pool);
3752
3753 mutex_init(&pool->assoc_mutex);
3754 ida_init(&pool->worker_ida);
3755 4864
3756 /* alloc pool ID */ 4865 /* alloc pool ID */
4866 mutex_lock(&wq_pool_mutex);
3757 BUG_ON(worker_pool_assign_id(pool)); 4867 BUG_ON(worker_pool_assign_id(pool));
4868 mutex_unlock(&wq_pool_mutex);
3758 } 4869 }
3759 } 4870 }
3760 4871
3761 /* create the initial worker */ 4872 /* create the initial worker */
3762 for_each_online_wq_cpu(cpu) { 4873 for_each_online_cpu(cpu) {
3763 struct worker_pool *pool; 4874 struct worker_pool *pool;
3764 4875
3765 for_each_std_worker_pool(pool, cpu) { 4876 for_each_cpu_worker_pool(pool, cpu) {
3766 struct worker *worker; 4877 pool->flags &= ~POOL_DISASSOCIATED;
4878 BUG_ON(create_and_start_worker(pool) < 0);
4879 }
4880 }
3767 4881
3768 if (cpu != WORK_CPU_UNBOUND) 4882 /* create default unbound wq attrs */
3769 pool->flags &= ~POOL_DISASSOCIATED; 4883 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
4884 struct workqueue_attrs *attrs;
3770 4885
3771 worker = create_worker(pool); 4886 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
3772 BUG_ON(!worker); 4887 attrs->nice = std_nice[i];
3773 spin_lock_irq(&pool->lock); 4888 unbound_std_wq_attrs[i] = attrs;
3774 start_worker(worker);
3775 spin_unlock_irq(&pool->lock);
3776 }
3777 } 4889 }
3778 4890
3779 system_wq = alloc_workqueue("events", 0, 0); 4891 system_wq = alloc_workqueue("events", 0, 0);
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 07650264ec15..84ab6e1dc6fb 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -32,14 +32,12 @@ struct worker {
32 struct list_head scheduled; /* L: scheduled works */ 32 struct list_head scheduled; /* L: scheduled works */
33 struct task_struct *task; /* I: worker task */ 33 struct task_struct *task; /* I: worker task */
34 struct worker_pool *pool; /* I: the associated pool */ 34 struct worker_pool *pool; /* I: the associated pool */
35 /* L: for rescuers */
35 /* 64 bytes boundary on 64bit, 32 on 32bit */ 36 /* 64 bytes boundary on 64bit, 32 on 32bit */
36 unsigned long last_active; /* L: last active timestamp */ 37 unsigned long last_active; /* L: last active timestamp */
37 unsigned int flags; /* X: flags */ 38 unsigned int flags; /* X: flags */
38 int id; /* I: worker id */ 39 int id; /* I: worker id */
39 40
40 /* for rebinding worker to CPU */
41 struct work_struct rebind_work; /* L: for busy worker */
42
43 /* used only by rescuers to point to the target workqueue */ 41 /* used only by rescuers to point to the target workqueue */
44 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ 42 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
45}; 43};
@@ -58,8 +56,7 @@ static inline struct worker *current_wq_worker(void)
58 * Scheduler hooks for concurrency managed workqueue. Only to be used from 56 * Scheduler hooks for concurrency managed workqueue. Only to be used from
59 * sched.c and workqueue.c. 57 * sched.c and workqueue.c.
60 */ 58 */
61void wq_worker_waking_up(struct task_struct *task, unsigned int cpu); 59void wq_worker_waking_up(struct task_struct *task, int cpu);
62struct task_struct *wq_worker_sleeping(struct task_struct *task, 60struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu);
63 unsigned int cpu);
64 61
65#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ 62#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index 9681d54b95d1..f8e0e5367398 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/printk.h>
11#include <linux/spinlock.h> 12#include <linux/spinlock.h>
12#include <linux/tty.h> 13#include <linux/tty.h>
13#include <linux/wait.h> 14#include <linux/wait.h>
@@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes)
28 wake_up_klogd(); 29 wake_up_klogd();
29 } 30 }
30} 31}
31
32
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 5e396accd3d0..d87a17a819d0 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref)
862 entry = bucket_find_exact(bucket, ref); 862 entry = bucket_find_exact(bucket, ref);
863 863
864 if (!entry) { 864 if (!entry) {
865 /* must drop lock before calling dma_mapping_error */
866 put_hash_bucket(bucket, &flags);
867
865 if (dma_mapping_error(ref->dev, ref->dev_addr)) { 868 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
866 err_printk(ref->dev, NULL, 869 err_printk(ref->dev, NULL,
867 "DMA-API: device driver tries " 870 "DMA-API: device driver tries to free an "
868 "to free an invalid DMA memory address\n"); 871 "invalid DMA memory address\n");
869 return; 872 } else {
873 err_printk(ref->dev, NULL,
874 "DMA-API: device driver tries to free DMA "
875 "memory it has not allocated [device "
876 "address=0x%016llx] [size=%llu bytes]\n",
877 ref->dev_addr, ref->size);
870 } 878 }
871 err_printk(ref->dev, NULL, "DMA-API: device driver tries " 879 return;
872 "to free DMA memory it has not allocated "
873 "[device address=0x%016llx] [size=%llu bytes]\n",
874 ref->dev_addr, ref->size);
875 goto out;
876 } 880 }
877 881
878 if (ref->size != entry->size) { 882 if (ref->size != entry->size) {
@@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref)
936 hash_bucket_del(entry); 940 hash_bucket_del(entry);
937 dma_entry_free(entry); 941 dma_entry_free(entry);
938 942
939out:
940 put_hash_bucket(bucket, &flags); 943 put_hash_bucket(bucket, &flags);
941} 944}
942 945
@@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1082 ref.dev = dev; 1085 ref.dev = dev;
1083 ref.dev_addr = dma_addr; 1086 ref.dev_addr = dma_addr;
1084 bucket = get_hash_bucket(&ref, &flags); 1087 bucket = get_hash_bucket(&ref, &flags);
1085 entry = bucket_find_exact(bucket, &ref);
1086 1088
1087 if (!entry) 1089 list_for_each_entry(entry, &bucket->list, list) {
1088 goto out; 1090 if (!exact_match(&ref, entry))
1091 continue;
1092
1093 /*
1094 * The same physical address can be mapped multiple
1095 * times. Without a hardware IOMMU this results in the
1096 * same device addresses being put into the dma-debug
1097 * hash multiple times too. This can result in false
1098 * positives being reported. Therefore we implement a
1099 * best-fit algorithm here which updates the first entry
1100 * from the hash which fits the reference value and is
1101 * not currently listed as being checked.
1102 */
1103 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1104 entry->map_err_type = MAP_ERR_CHECKED;
1105 break;
1106 }
1107 }
1089 1108
1090 entry->map_err_type = MAP_ERR_CHECKED;
1091out:
1092 put_hash_bucket(bucket, &flags); 1109 put_hash_bucket(bucket, &flags);
1093} 1110}
1094EXPORT_SYMBOL(debug_dma_mapping_error); 1111EXPORT_SYMBOL(debug_dma_mapping_error);
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 41733c5dc820..502517492258 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -31,13 +31,14 @@ EXPORT_SYMBOL_GPL(noop_backing_dev_info);
31static struct class *bdi_class; 31static struct class *bdi_class;
32 32
33/* 33/*
34 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as 34 * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
35 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
36 * locking. 35 * locking.
37 */ 36 */
38DEFINE_SPINLOCK(bdi_lock); 37DEFINE_SPINLOCK(bdi_lock);
39LIST_HEAD(bdi_list); 38LIST_HEAD(bdi_list);
40LIST_HEAD(bdi_pending_list); 39
40/* bdi_wq serves all asynchronous writeback tasks */
41struct workqueue_struct *bdi_wq;
41 42
42void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) 43void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
43{ 44{
@@ -257,6 +258,11 @@ static int __init default_bdi_init(void)
257{ 258{
258 int err; 259 int err;
259 260
261 bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
262 WQ_UNBOUND | WQ_SYSFS, 0);
263 if (!bdi_wq)
264 return -ENOMEM;
265
260 err = bdi_init(&default_backing_dev_info); 266 err = bdi_init(&default_backing_dev_info);
261 if (!err) 267 if (!err)
262 bdi_register(&default_backing_dev_info, NULL, "default"); 268 bdi_register(&default_backing_dev_info, NULL, "default");
@@ -271,26 +277,6 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
271 return wb_has_dirty_io(&bdi->wb); 277 return wb_has_dirty_io(&bdi->wb);
272} 278}
273 279
274static void wakeup_timer_fn(unsigned long data)
275{
276 struct backing_dev_info *bdi = (struct backing_dev_info *)data;
277
278 spin_lock_bh(&bdi->wb_lock);
279 if (bdi->wb.task) {
280 trace_writeback_wake_thread(bdi);
281 wake_up_process(bdi->wb.task);
282 } else if (bdi->dev) {
283 /*
284 * When bdi tasks are inactive for long time, they are killed.
285 * In this case we have to wake-up the forker thread which
286 * should create and run the bdi thread.
287 */
288 trace_writeback_wake_forker_thread(bdi);
289 wake_up_process(default_backing_dev_info.wb.task);
290 }
291 spin_unlock_bh(&bdi->wb_lock);
292}
293
294/* 280/*
295 * This function is used when the first inode for this bdi is marked dirty. It 281 * This function is used when the first inode for this bdi is marked dirty. It
296 * wakes-up the corresponding bdi thread which should then take care of the 282 * wakes-up the corresponding bdi thread which should then take care of the
@@ -307,176 +293,7 @@ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
307 unsigned long timeout; 293 unsigned long timeout;
308 294
309 timeout = msecs_to_jiffies(dirty_writeback_interval * 10); 295 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
310 mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout); 296 mod_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
311}
312
313/*
314 * Calculate the longest interval (jiffies) bdi threads are allowed to be
315 * inactive.
316 */
317static unsigned long bdi_longest_inactive(void)
318{
319 unsigned long interval;
320
321 interval = msecs_to_jiffies(dirty_writeback_interval * 10);
322 return max(5UL * 60 * HZ, interval);
323}
324
325/*
326 * Clear pending bit and wakeup anybody waiting for flusher thread creation or
327 * shutdown
328 */
329static void bdi_clear_pending(struct backing_dev_info *bdi)
330{
331 clear_bit(BDI_pending, &bdi->state);
332 smp_mb__after_clear_bit();
333 wake_up_bit(&bdi->state, BDI_pending);
334}
335
336static int bdi_forker_thread(void *ptr)
337{
338 struct bdi_writeback *me = ptr;
339
340 current->flags |= PF_SWAPWRITE;
341 set_freezable();
342
343 /*
344 * Our parent may run at a different priority, just set us to normal
345 */
346 set_user_nice(current, 0);
347
348 for (;;) {
349 struct task_struct *task = NULL;
350 struct backing_dev_info *bdi;
351 enum {
352 NO_ACTION, /* Nothing to do */
353 FORK_THREAD, /* Fork bdi thread */
354 KILL_THREAD, /* Kill inactive bdi thread */
355 } action = NO_ACTION;
356
357 /*
358 * Temporary measure, we want to make sure we don't see
359 * dirty data on the default backing_dev_info
360 */
361 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) {
362 del_timer(&me->wakeup_timer);
363 wb_do_writeback(me, 0);
364 }
365
366 spin_lock_bh(&bdi_lock);
367 /*
368 * In the following loop we are going to check whether we have
369 * some work to do without any synchronization with tasks
370 * waking us up to do work for them. Set the task state here
371 * so that we don't miss wakeups after verifying conditions.
372 */
373 set_current_state(TASK_INTERRUPTIBLE);
374
375 list_for_each_entry(bdi, &bdi_list, bdi_list) {
376 bool have_dirty_io;
377
378 if (!bdi_cap_writeback_dirty(bdi) ||
379 bdi_cap_flush_forker(bdi))
380 continue;
381
382 WARN(!test_bit(BDI_registered, &bdi->state),
383 "bdi %p/%s is not registered!\n", bdi, bdi->name);
384
385 have_dirty_io = !list_empty(&bdi->work_list) ||
386 wb_has_dirty_io(&bdi->wb);
387
388 /*
389 * If the bdi has work to do, but the thread does not
390 * exist - create it.
391 */
392 if (!bdi->wb.task && have_dirty_io) {
393 /*
394 * Set the pending bit - if someone will try to
395 * unregister this bdi - it'll wait on this bit.
396 */
397 set_bit(BDI_pending, &bdi->state);
398 action = FORK_THREAD;
399 break;
400 }
401
402 spin_lock(&bdi->wb_lock);
403
404 /*
405 * If there is no work to do and the bdi thread was
406 * inactive long enough - kill it. The wb_lock is taken
407 * to make sure no-one adds more work to this bdi and
408 * wakes the bdi thread up.
409 */
410 if (bdi->wb.task && !have_dirty_io &&
411 time_after(jiffies, bdi->wb.last_active +
412 bdi_longest_inactive())) {
413 task = bdi->wb.task;
414 bdi->wb.task = NULL;
415 spin_unlock(&bdi->wb_lock);
416 set_bit(BDI_pending, &bdi->state);
417 action = KILL_THREAD;
418 break;
419 }
420 spin_unlock(&bdi->wb_lock);
421 }
422 spin_unlock_bh(&bdi_lock);
423
424 /* Keep working if default bdi still has things to do */
425 if (!list_empty(&me->bdi->work_list))
426 __set_current_state(TASK_RUNNING);
427
428 switch (action) {
429 case FORK_THREAD:
430 __set_current_state(TASK_RUNNING);
431 task = kthread_create(bdi_writeback_thread, &bdi->wb,
432 "flush-%s", dev_name(bdi->dev));
433 if (IS_ERR(task)) {
434 /*
435 * If thread creation fails, force writeout of
436 * the bdi from the thread. Hopefully 1024 is
437 * large enough for efficient IO.
438 */
439 writeback_inodes_wb(&bdi->wb, 1024,
440 WB_REASON_FORKER_THREAD);
441 } else {
442 /*
443 * The spinlock makes sure we do not lose
444 * wake-ups when racing with 'bdi_queue_work()'.
445 * And as soon as the bdi thread is visible, we
446 * can start it.
447 */
448 spin_lock_bh(&bdi->wb_lock);
449 bdi->wb.task = task;
450 spin_unlock_bh(&bdi->wb_lock);
451 wake_up_process(task);
452 }
453 bdi_clear_pending(bdi);
454 break;
455
456 case KILL_THREAD:
457 __set_current_state(TASK_RUNNING);
458 kthread_stop(task);
459 bdi_clear_pending(bdi);
460 break;
461
462 case NO_ACTION:
463 if (!wb_has_dirty_io(me) || !dirty_writeback_interval)
464 /*
465 * There are no dirty data. The only thing we
466 * should now care about is checking for
467 * inactive bdi threads and killing them. Thus,
468 * let's sleep for longer time, save energy and
469 * be friendly for battery-driven devices.
470 */
471 schedule_timeout(bdi_longest_inactive());
472 else
473 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
474 try_to_freeze();
475 break;
476 }
477 }
478
479 return 0;
480} 297}
481 298
482/* 299/*
@@ -489,6 +306,9 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi)
489 spin_unlock_bh(&bdi_lock); 306 spin_unlock_bh(&bdi_lock);
490 307
491 synchronize_rcu_expedited(); 308 synchronize_rcu_expedited();
309
310 /* bdi_list is now unused, clear it to mark @bdi dying */
311 INIT_LIST_HEAD(&bdi->bdi_list);
492} 312}
493 313
494int bdi_register(struct backing_dev_info *bdi, struct device *parent, 314int bdi_register(struct backing_dev_info *bdi, struct device *parent,
@@ -508,20 +328,6 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
508 328
509 bdi->dev = dev; 329 bdi->dev = dev;
510 330
511 /*
512 * Just start the forker thread for our default backing_dev_info,
513 * and add other bdi's to the list. They will get a thread created
514 * on-demand when they need it.
515 */
516 if (bdi_cap_flush_forker(bdi)) {
517 struct bdi_writeback *wb = &bdi->wb;
518
519 wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s",
520 dev_name(dev));
521 if (IS_ERR(wb->task))
522 return PTR_ERR(wb->task);
523 }
524
525 bdi_debug_register(bdi, dev_name(dev)); 331 bdi_debug_register(bdi, dev_name(dev));
526 set_bit(BDI_registered, &bdi->state); 332 set_bit(BDI_registered, &bdi->state);
527 333
@@ -545,8 +351,6 @@ EXPORT_SYMBOL(bdi_register_dev);
545 */ 351 */
546static void bdi_wb_shutdown(struct backing_dev_info *bdi) 352static void bdi_wb_shutdown(struct backing_dev_info *bdi)
547{ 353{
548 struct task_struct *task;
549
550 if (!bdi_cap_writeback_dirty(bdi)) 354 if (!bdi_cap_writeback_dirty(bdi))
551 return; 355 return;
552 356
@@ -556,22 +360,20 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
556 bdi_remove_from_list(bdi); 360 bdi_remove_from_list(bdi);
557 361
558 /* 362 /*
559 * If setup is pending, wait for that to complete first 363 * Drain work list and shutdown the delayed_work. At this point,
364 * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
365 * is dying and its work_list needs to be drained no matter what.
560 */ 366 */
561 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait, 367 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
562 TASK_UNINTERRUPTIBLE); 368 flush_delayed_work(&bdi->wb.dwork);
369 WARN_ON(!list_empty(&bdi->work_list));
563 370
564 /* 371 /*
565 * Finally, kill the kernel thread. We don't need to be RCU 372 * This shouldn't be necessary unless @bdi for some reason has
566 * safe anymore, since the bdi is gone from visibility. 373 * unflushed dirty IO after work_list is drained. Do it anyway
374 * just in case.
567 */ 375 */
568 spin_lock_bh(&bdi->wb_lock); 376 cancel_delayed_work_sync(&bdi->wb.dwork);
569 task = bdi->wb.task;
570 bdi->wb.task = NULL;
571 spin_unlock_bh(&bdi->wb_lock);
572
573 if (task)
574 kthread_stop(task);
575} 377}
576 378
577/* 379/*
@@ -597,10 +399,8 @@ void bdi_unregister(struct backing_dev_info *bdi)
597 bdi_set_min_ratio(bdi, 0); 399 bdi_set_min_ratio(bdi, 0);
598 trace_writeback_bdi_unregister(bdi); 400 trace_writeback_bdi_unregister(bdi);
599 bdi_prune_sb(bdi); 401 bdi_prune_sb(bdi);
600 del_timer_sync(&bdi->wb.wakeup_timer);
601 402
602 if (!bdi_cap_flush_forker(bdi)) 403 bdi_wb_shutdown(bdi);
603 bdi_wb_shutdown(bdi);
604 bdi_debug_unregister(bdi); 404 bdi_debug_unregister(bdi);
605 405
606 spin_lock_bh(&bdi->wb_lock); 406 spin_lock_bh(&bdi->wb_lock);
@@ -622,7 +422,7 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
622 INIT_LIST_HEAD(&wb->b_io); 422 INIT_LIST_HEAD(&wb->b_io);
623 INIT_LIST_HEAD(&wb->b_more_io); 423 INIT_LIST_HEAD(&wb->b_more_io);
624 spin_lock_init(&wb->list_lock); 424 spin_lock_init(&wb->list_lock);
625 setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi); 425 INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn);
626} 426}
627 427
628/* 428/*
@@ -695,12 +495,11 @@ void bdi_destroy(struct backing_dev_info *bdi)
695 bdi_unregister(bdi); 495 bdi_unregister(bdi);
696 496
697 /* 497 /*
698 * If bdi_unregister() had already been called earlier, the 498 * If bdi_unregister() had already been called earlier, the dwork
699 * wakeup_timer could still be armed because bdi_prune_sb() 499 * could still be pending because bdi_prune_sb() can race with the
700 * can race with the bdi_wakeup_thread_delayed() calls from 500 * bdi_wakeup_thread_delayed() calls from __mark_inode_dirty().
701 * __mark_inode_dirty().
702 */ 501 */
703 del_timer_sync(&bdi->wb.wakeup_timer); 502 cancel_delayed_work_sync(&bdi->wb.dwork);
704 503
705 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 504 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
706 percpu_counter_destroy(&bdi->bdi_stat[i]); 505 percpu_counter_destroy(&bdi->bdi_stat[i]);
diff --git a/mm/fremap.c b/mm/fremap.c
index 4723ac8d2fc2..87da3590c61e 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -204,10 +204,8 @@ get_write_lock:
204 unsigned long addr; 204 unsigned long addr;
205 struct file *file = get_file(vma->vm_file); 205 struct file *file = get_file(vma->vm_file);
206 206
207 vm_flags = vma->vm_flags; 207 addr = mmap_region(file, start, size,
208 if (!(flags & MAP_NONBLOCK)) 208 vma->vm_flags, pgoff);
209 vm_flags |= VM_POPULATE;
210 addr = mmap_region(file, start, size, vm_flags, pgoff);
211 fput(file); 209 fput(file);
212 if (IS_ERR_VALUE(addr)) { 210 if (IS_ERR_VALUE(addr)) {
213 err = addr; 211 err = addr;
@@ -226,12 +224,6 @@ get_write_lock:
226 mutex_unlock(&mapping->i_mmap_mutex); 224 mutex_unlock(&mapping->i_mmap_mutex);
227 } 225 }
228 226
229 if (!(flags & MAP_NONBLOCK) && !(vma->vm_flags & VM_POPULATE)) {
230 if (!has_write_lock)
231 goto get_write_lock;
232 vma->vm_flags |= VM_POPULATE;
233 }
234
235 if (vma->vm_flags & VM_LOCKED) { 227 if (vma->vm_flags & VM_LOCKED) {
236 /* 228 /*
237 * drop PG_Mlocked flag for over-mapped range 229 * drop PG_Mlocked flag for over-mapped range
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0a0be33bb199..ca9a7c6d7e97 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
2124/* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 2124/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2125unsigned long hugetlb_total_pages(void) 2125unsigned long hugetlb_total_pages(void)
2126{ 2126{
2127 struct hstate *h = &default_hstate; 2127 struct hstate *h;
2128 return h->nr_huge_pages * pages_per_huge_page(h); 2128 unsigned long nr_total_pages = 0;
2129
2130 for_each_hstate(h)
2131 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2132 return nr_total_pages;
2129} 2133}
2130 2134
2131static int hugetlb_acct_memory(struct hstate *h, long delta) 2135static int hugetlb_acct_memory(struct hstate *h, long delta)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9597eec8239d..ee3765760818 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1779,7 +1779,11 @@ void try_offline_node(int nid)
1779 for (i = 0; i < MAX_NR_ZONES; i++) { 1779 for (i = 0; i < MAX_NR_ZONES; i++) {
1780 struct zone *zone = pgdat->node_zones + i; 1780 struct zone *zone = pgdat->node_zones + i;
1781 1781
1782 if (zone->wait_table) 1782 /*
1783 * wait_table may be allocated from boot memory,
1784 * here only free if it's allocated by vmalloc.
1785 */
1786 if (is_vmalloc_addr(zone->wait_table))
1783 vfree(zone->wait_table); 1787 vfree(zone->wait_table);
1784 } 1788 }
1785 1789
diff --git a/mm/mlock.c b/mm/mlock.c
index 1c5e33fce639..79b7cf7d1bca 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -358,7 +358,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
358 358
359 newflags = vma->vm_flags & ~VM_LOCKED; 359 newflags = vma->vm_flags & ~VM_LOCKED;
360 if (on) 360 if (on)
361 newflags |= VM_LOCKED | VM_POPULATE; 361 newflags |= VM_LOCKED;
362 362
363 tmp = vma->vm_end; 363 tmp = vma->vm_end;
364 if (tmp > end) 364 if (tmp > end)
@@ -418,8 +418,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
418 * range with the first VMA. Also, skip undesirable VMA types. 418 * range with the first VMA. Also, skip undesirable VMA types.
419 */ 419 */
420 nend = min(end, vma->vm_end); 420 nend = min(end, vma->vm_end);
421 if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) != 421 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
422 VM_POPULATE)
423 continue; 422 continue;
424 if (nstart < vma->vm_start) 423 if (nstart < vma->vm_start)
425 nstart = vma->vm_start; 424 nstart = vma->vm_start;
@@ -492,9 +491,9 @@ static int do_mlockall(int flags)
492 struct vm_area_struct * vma, * prev = NULL; 491 struct vm_area_struct * vma, * prev = NULL;
493 492
494 if (flags & MCL_FUTURE) 493 if (flags & MCL_FUTURE)
495 current->mm->def_flags |= VM_LOCKED | VM_POPULATE; 494 current->mm->def_flags |= VM_LOCKED;
496 else 495 else
497 current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE); 496 current->mm->def_flags &= ~VM_LOCKED;
498 if (flags == MCL_FUTURE) 497 if (flags == MCL_FUTURE)
499 goto out; 498 goto out;
500 499
@@ -503,7 +502,7 @@ static int do_mlockall(int flags)
503 502
504 newflags = vma->vm_flags & ~VM_LOCKED; 503 newflags = vma->vm_flags & ~VM_LOCKED;
505 if (flags & MCL_CURRENT) 504 if (flags & MCL_CURRENT)
506 newflags |= VM_LOCKED | VM_POPULATE; 505 newflags |= VM_LOCKED;
507 506
508 /* Ignore errors */ 507 /* Ignore errors */
509 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); 508 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
diff --git a/mm/mmap.c b/mm/mmap.c
index 2664a47cec93..6466699b16cb 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1306,7 +1306,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1306 } 1306 }
1307 1307
1308 addr = mmap_region(file, addr, len, vm_flags, pgoff); 1308 addr = mmap_region(file, addr, len, vm_flags, pgoff);
1309 if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE)) 1309 if (!IS_ERR_VALUE(addr) &&
1310 ((vm_flags & VM_LOCKED) ||
1311 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1310 *populate = len; 1312 *populate = len;
1311 return addr; 1313 return addr;
1312} 1314}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a18714469bf7..85addcd9372b 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -86,13 +86,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
86 86
87 grp = &vlan_info->grp; 87 grp = &vlan_info->grp;
88 88
89 /* Take it out of our own structures, but be sure to interlock with
90 * HW accelerating devices or SW vlan input packet processing if
91 * VLAN is not 0 (leave it there for 802.1p).
92 */
93 if (vlan_id)
94 vlan_vid_del(real_dev, vlan_id);
95
96 grp->nr_vlan_devs--; 89 grp->nr_vlan_devs--;
97 90
98 if (vlan->flags & VLAN_FLAG_MVRP) 91 if (vlan->flags & VLAN_FLAG_MVRP)
@@ -114,6 +107,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
114 vlan_gvrp_uninit_applicant(real_dev); 107 vlan_gvrp_uninit_applicant(real_dev);
115 } 108 }
116 109
110 /* Take it out of our own structures, but be sure to interlock with
111 * HW accelerating devices or SW vlan input packet processing if
112 * VLAN is not 0 (leave it there for 802.1p).
113 */
114 if (vlan_id)
115 vlan_vid_del(real_dev, vlan_id);
116
117 /* Get rid of the vlan's reference to real_dev */ 117 /* Get rid of the vlan's reference to real_dev */
118 dev_put(real_dev); 118 dev_put(real_dev);
119} 119}
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index a0b253ecadaf..a5bb0a769eb9 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1288,7 +1288,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
1288 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; 1288 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
1289 1289
1290 /* unpack the aggregated packets and process them one by one */ 1290 /* unpack the aggregated packets and process them one by one */
1291 do { 1291 while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
1292 batadv_ogm_packet->tt_num_changes)) {
1292 tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN; 1293 tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
1293 1294
1294 batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff, 1295 batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff,
@@ -1299,8 +1300,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
1299 1300
1300 packet_pos = packet_buff + buff_pos; 1301 packet_pos = packet_buff + buff_pos;
1301 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; 1302 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
1302 } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, 1303 }
1303 batadv_ogm_packet->tt_num_changes));
1304 1304
1305 kfree_skb(skb); 1305 kfree_skb(skb);
1306 return NET_RX_SUCCESS; 1306 return NET_RX_SUCCESS;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 79d87d8d4f51..fad0302bdb32 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -359,6 +359,7 @@ static void __sco_sock_close(struct sock *sk)
359 sco_chan_del(sk, ECONNRESET); 359 sco_chan_del(sk, ECONNRESET);
360 break; 360 break;
361 361
362 case BT_CONNECT2:
362 case BT_CONNECT: 363 case BT_CONNECT:
363 case BT_DISCONN: 364 case BT_DISCONN:
364 sco_chan_del(sk, ECONNRESET); 365 sco_chan_del(sk, ECONNRESET);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index b0812c91c0f0..bab338e6270d 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -423,7 +423,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
423 return 0; 423 return 0;
424 br_warn(br, "adding interface %s with same address " 424 br_warn(br, "adding interface %s with same address "
425 "as a received packet\n", 425 "as a received packet\n",
426 source->dev->name); 426 source ? source->dev->name : br->dev->name);
427 fdb_delete(br, fdb); 427 fdb_delete(br, fdb);
428 } 428 }
429 429
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 27aa3ee517ce..299fc5f40a26 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -29,6 +29,7 @@ static inline size_t br_port_info_size(void)
29 + nla_total_size(1) /* IFLA_BRPORT_MODE */ 29 + nla_total_size(1) /* IFLA_BRPORT_MODE */
30 + nla_total_size(1) /* IFLA_BRPORT_GUARD */ 30 + nla_total_size(1) /* IFLA_BRPORT_GUARD */
31 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ 31 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */
32 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
32 + 0; 33 + 0;
33} 34}
34 35
@@ -329,6 +330,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
329 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); 330 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
330 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); 331 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
331 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); 332 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
333 br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
332 334
333 if (tb[IFLA_BRPORT_COST]) { 335 if (tb[IFLA_BRPORT_COST]) {
334 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); 336 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
diff --git a/net/core/dev.c b/net/core/dev.c
index dffbef70cd31..b13e5c766c11 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1545,7 +1545,6 @@ void net_enable_timestamp(void)
1545 return; 1545 return;
1546 } 1546 }
1547#endif 1547#endif
1548 WARN_ON(in_interrupt());
1549 static_key_slow_inc(&netstamp_needed); 1548 static_key_slow_inc(&netstamp_needed);
1550} 1549}
1551EXPORT_SYMBOL(net_enable_timestamp); 1550EXPORT_SYMBOL(net_enable_timestamp);
@@ -2219,9 +2218,9 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2219 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 2218 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2220 struct packet_offload *ptype; 2219 struct packet_offload *ptype;
2221 __be16 type = skb->protocol; 2220 __be16 type = skb->protocol;
2221 int vlan_depth = ETH_HLEN;
2222 2222
2223 while (type == htons(ETH_P_8021Q)) { 2223 while (type == htons(ETH_P_8021Q)) {
2224 int vlan_depth = ETH_HLEN;
2225 struct vlan_hdr *vh; 2224 struct vlan_hdr *vh;
2226 2225
2227 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 2226 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 9d4c7201400d..e187bf06d673 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -140,6 +140,8 @@ ipv6:
140 flow->ports = *ports; 140 flow->ports = *ports;
141 } 141 }
142 142
143 flow->thoff = (u16) nhoff;
144
143 return true; 145 return true;
144} 146}
145EXPORT_SYMBOL(skb_flow_dissect); 147EXPORT_SYMBOL(skb_flow_dissect);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a585d45cc9d9..5fb8d7e47294 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2621,7 +2621,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2621 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); 2621 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
2622 2622
2623 while (RTA_OK(attr, attrlen)) { 2623 while (RTA_OK(attr, attrlen)) {
2624 unsigned int flavor = attr->rta_type; 2624 unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;
2625 if (flavor) { 2625 if (flavor) {
2626 if (flavor > rta_max[sz_idx]) 2626 if (flavor > rta_max[sz_idx])
2627 return -EINVAL; 2627 return -EINVAL;
diff --git a/net/core/scm.c b/net/core/scm.c
index 905dcc6ad1e3..2dc6cdaaae8a 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -24,6 +24,7 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/security.h> 26#include <linux/security.h>
27#include <linux/pid_namespace.h>
27#include <linux/pid.h> 28#include <linux/pid.h>
28#include <linux/nsproxy.h> 29#include <linux/nsproxy.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
@@ -52,7 +53,8 @@ static __inline__ int scm_check_creds(struct ucred *creds)
52 if (!uid_valid(uid) || !gid_valid(gid)) 53 if (!uid_valid(uid) || !gid_valid(gid))
53 return -EINVAL; 54 return -EINVAL;
54 55
55 if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) && 56 if ((creds->pid == task_tgid_vnr(current) ||
57 ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
56 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || 58 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
57 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && 59 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
58 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || 60 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 68f6a94f7661..c929d9c1c4b6 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1333,8 +1333,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1333 iph->frag_off |= htons(IP_MF); 1333 iph->frag_off |= htons(IP_MF);
1334 offset += (skb->len - skb->mac_len - iph->ihl * 4); 1334 offset += (skb->len - skb->mac_len - iph->ihl * 4);
1335 } else { 1335 } else {
1336 if (!(iph->frag_off & htons(IP_DF))) 1336 iph->id = htons(id++);
1337 iph->id = htons(id++);
1338 } 1337 }
1339 iph->tot_len = htons(skb->len - skb->mac_len); 1338 iph->tot_len = htons(skb->len - skb->mac_len);
1340 iph->check = 0; 1339 iph->check = 0;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 245ae078a07f..f4fd23de9b13 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -21,6 +21,7 @@
21#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24#include <net/sock.h>
24#include <net/inet_frag.h> 25#include <net/inet_frag.h>
25 26
26static void inet_frag_secret_rebuild(unsigned long dummy) 27static void inet_frag_secret_rebuild(unsigned long dummy)
@@ -277,6 +278,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
277 __releases(&f->lock) 278 __releases(&f->lock)
278{ 279{
279 struct inet_frag_queue *q; 280 struct inet_frag_queue *q;
281 int depth = 0;
280 282
281 hlist_for_each_entry(q, &f->hash[hash], list) { 283 hlist_for_each_entry(q, &f->hash[hash], list) {
282 if (q->net == nf && f->match(q, key)) { 284 if (q->net == nf && f->match(q, key)) {
@@ -284,9 +286,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
284 read_unlock(&f->lock); 286 read_unlock(&f->lock);
285 return q; 287 return q;
286 } 288 }
289 depth++;
287 } 290 }
288 read_unlock(&f->lock); 291 read_unlock(&f->lock);
289 292
290 return inet_frag_create(nf, f, key); 293 if (depth <= INETFRAGS_MAXDEPTH)
294 return inet_frag_create(nf, f, key);
295 else
296 return ERR_PTR(-ENOBUFS);
291} 297}
292EXPORT_SYMBOL(inet_frag_find); 298EXPORT_SYMBOL(inet_frag_find);
299
300void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
301 const char *prefix)
302{
303 static const char msg[] = "inet_frag_find: Fragment hash bucket"
304 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
305 ". Dropping fragment.\n";
306
307 if (PTR_ERR(q) == -ENOBUFS)
308 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
309}
310EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b6d30acb600c..a6445b843ef4 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -292,14 +292,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
292 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); 292 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
293 293
294 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); 294 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
295 if (q == NULL) 295 if (IS_ERR_OR_NULL(q)) {
296 goto out_nomem; 296 inet_frag_maybe_warn_overflow(q, pr_fmt());
297 297 return NULL;
298 }
298 return container_of(q, struct ipq, q); 299 return container_of(q, struct ipq, q);
299
300out_nomem:
301 LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n"));
302 return NULL;
303} 300}
304 301
305/* Is the fragment too far ahead to be part of ipq? */ 302/* Is the fragment too far ahead to be part of ipq? */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d0ef0e674ec5..91d66dbde9c0 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -798,10 +798,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
798 798
799 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 799 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
800 gre_hlen = 0; 800 gre_hlen = 0;
801 if (skb->protocol == htons(ETH_P_IP)) 801 tiph = (const struct iphdr *)skb->data;
802 tiph = (const struct iphdr *)skb->data;
803 else
804 tiph = &tunnel->parms.iph;
805 } else { 802 } else {
806 gre_hlen = tunnel->hlen; 803 gre_hlen = tunnel->hlen;
807 tiph = &tunnel->parms.iph; 804 tiph = &tunnel->parms.iph;
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 310a3647c83d..ec7264514a82 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -370,7 +370,6 @@ int ip_options_compile(struct net *net,
370 } 370 }
371 switch (optptr[3]&0xF) { 371 switch (optptr[3]&0xF) {
372 case IPOPT_TS_TSONLY: 372 case IPOPT_TS_TSONLY:
373 opt->ts = optptr - iph;
374 if (skb) 373 if (skb)
375 timeptr = &optptr[optptr[2]-1]; 374 timeptr = &optptr[optptr[2]-1];
376 opt->ts_needtime = 1; 375 opt->ts_needtime = 1;
@@ -381,7 +380,6 @@ int ip_options_compile(struct net *net,
381 pp_ptr = optptr + 2; 380 pp_ptr = optptr + 2;
382 goto error; 381 goto error;
383 } 382 }
384 opt->ts = optptr - iph;
385 if (rt) { 383 if (rt) {
386 spec_dst_fill(&spec_dst, skb); 384 spec_dst_fill(&spec_dst, skb);
387 memcpy(&optptr[optptr[2]-1], &spec_dst, 4); 385 memcpy(&optptr[optptr[2]-1], &spec_dst, 4);
@@ -396,7 +394,6 @@ int ip_options_compile(struct net *net,
396 pp_ptr = optptr + 2; 394 pp_ptr = optptr + 2;
397 goto error; 395 goto error;
398 } 396 }
399 opt->ts = optptr - iph;
400 { 397 {
401 __be32 addr; 398 __be32 addr;
402 memcpy(&addr, &optptr[optptr[2]-1], 4); 399 memcpy(&addr, &optptr[optptr[2]-1], 4);
@@ -429,12 +426,12 @@ int ip_options_compile(struct net *net,
429 pp_ptr = optptr + 3; 426 pp_ptr = optptr + 3;
430 goto error; 427 goto error;
431 } 428 }
432 opt->ts = optptr - iph;
433 if (skb) { 429 if (skb) {
434 optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); 430 optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
435 opt->is_changed = 1; 431 opt->is_changed = 1;
436 } 432 }
437 } 433 }
434 opt->ts = optptr - iph;
438 break; 435 break;
439 case IPOPT_RA: 436 case IPOPT_RA:
440 if (optlen < 4) { 437 if (optlen < 4) {
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 98cbc6877019..bf6c5cf31aed 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1522,7 +1522,8 @@ static int __init ip_auto_config(void)
1522 } 1522 }
1523 for (i++; i < CONF_NAMESERVERS_MAX; i++) 1523 for (i++; i < CONF_NAMESERVERS_MAX; i++)
1524 if (ic_nameservers[i] != NONE) 1524 if (ic_nameservers[i] != NONE)
1525 pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]); 1525 pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]);
1526 pr_cont("\n");
1526#endif /* !SILENT */ 1527#endif /* !SILENT */
1527 1528
1528 return 0; 1529 return 0;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index ce2d43e1f09f..0d755c50994b 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -36,19 +36,6 @@ config NF_CONNTRACK_PROC_COMPAT
36 36
37 If unsure, say Y. 37 If unsure, say Y.
38 38
39config IP_NF_QUEUE
40 tristate "IP Userspace queueing via NETLINK (OBSOLETE)"
41 depends on NETFILTER_ADVANCED
42 help
43 Netfilter has the ability to queue packets to user space: the
44 netlink device can be used to access them using this driver.
45
46 This option enables the old IPv4-only "ip_queue" implementation
47 which has been obsoleted by the new "nfnetlink_queue" code (see
48 CONFIG_NETFILTER_NETLINK_QUEUE).
49
50 To compile it as a module, choose M here. If unsure, say N.
51
52config IP_NF_IPTABLES 39config IP_NF_IPTABLES
53 tristate "IP tables support (required for filtering/masq/NAT)" 40 tristate "IP tables support (required for filtering/masq/NAT)"
54 default m if NETFILTER_ADVANCED=n 41 default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 47e854fcae24..e22020790709 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -775,7 +775,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
775 * Make sure that we have exactly size bytes 775 * Make sure that we have exactly size bytes
776 * available to the caller, no more, no less. 776 * available to the caller, no more, no less.
777 */ 777 */
778 skb->avail_size = size; 778 skb->reserved_tailroom = skb->end - skb->tail - size;
779 return skb; 779 return skb;
780 } 780 }
781 __kfree_skb(skb); 781 __kfree_skb(skb);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0d9bdacce99f..3bd55bad230a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2059,11 +2059,8 @@ void tcp_enter_loss(struct sock *sk, int how)
2059 if (tcp_is_reno(tp)) 2059 if (tcp_is_reno(tp))
2060 tcp_reset_reno_sack(tp); 2060 tcp_reset_reno_sack(tp);
2061 2061
2062 if (!how) { 2062 tp->undo_marker = tp->snd_una;
2063 /* Push undo marker, if it was plain RTO and nothing 2063 if (how) {
2064 * was retransmitted. */
2065 tp->undo_marker = tp->snd_una;
2066 } else {
2067 tp->sacked_out = 0; 2064 tp->sacked_out = 0;
2068 tp->fackets_out = 0; 2065 tp->fackets_out = 0;
2069 } 2066 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 4a8ec457310f..d09203c63264 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct sock *sk)
274 struct inet_sock *inet = inet_sk(sk); 274 struct inet_sock *inet = inet_sk(sk);
275 u32 mtu = tcp_sk(sk)->mtu_info; 275 u32 mtu = tcp_sk(sk)->mtu_info;
276 276
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
279 * unfragmented).
280 */
281 if (sk->sk_state == TCP_LISTEN)
282 return;
283
284 dst = inet_csk_update_pmtu(sk, mtu); 277 dst = inet_csk_update_pmtu(sk, mtu);
285 if (!dst) 278 if (!dst)
286 return; 279 return;
@@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
408 goto out; 401 goto out;
409 402
410 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ 403 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
404 /* We are not interested in TCP_LISTEN and open_requests
405 * (SYN-ACKs send out by Linux are always <576bytes so
406 * they should go through unfragmented).
407 */
408 if (sk->sk_state == TCP_LISTEN)
409 goto out;
410
411 tp->mtu_info = info; 411 tp->mtu_info = info;
412 if (!sock_owned_by_user(sk)) { 412 if (!sock_owned_by_user(sk)) {
413 tcp_v4_mtu_reduced(sk); 413 tcp_v4_mtu_reduced(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e2b4461074da..5d0b4387cba6 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1298,7 +1298,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
1298 eat = min_t(int, len, skb_headlen(skb)); 1298 eat = min_t(int, len, skb_headlen(skb));
1299 if (eat) { 1299 if (eat) {
1300 __skb_pull(skb, eat); 1300 __skb_pull(skb, eat);
1301 skb->avail_size -= eat;
1302 len -= eat; 1301 len -= eat;
1303 if (!len) 1302 if (!len)
1304 return; 1303 return;
@@ -1810,8 +1809,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1810 goto send_now; 1809 goto send_now;
1811 } 1810 }
1812 1811
1813 /* Ok, it looks like it is advisable to defer. */ 1812 /* Ok, it looks like it is advisable to defer.
1814 tp->tso_deferred = 1 | (jiffies << 1); 1813 * Do not rearm the timer if already set to not break TCP ACK clocking.
1814 */
1815 if (!tp->tso_deferred)
1816 tp->tso_deferred = 1 | (jiffies << 1);
1815 1817
1816 return true; 1818 return true;
1817 1819
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 265c42cf963c..0a073a263720 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1762,9 +1762,16 @@ int udp_rcv(struct sk_buff *skb)
1762 1762
1763void udp_destroy_sock(struct sock *sk) 1763void udp_destroy_sock(struct sock *sk)
1764{ 1764{
1765 struct udp_sock *up = udp_sk(sk);
1765 bool slow = lock_sock_fast(sk); 1766 bool slow = lock_sock_fast(sk);
1766 udp_flush_pending_frames(sk); 1767 udp_flush_pending_frames(sk);
1767 unlock_sock_fast(sk, slow); 1768 unlock_sock_fast(sk, slow);
1769 if (static_key_false(&udp_encap_needed) && up->encap_type) {
1770 void (*encap_destroy)(struct sock *sk);
1771 encap_destroy = ACCESS_ONCE(up->encap_destroy);
1772 if (encap_destroy)
1773 encap_destroy(sk);
1774 }
1768} 1775}
1769 1776
1770/* 1777/*
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f2c7e615f902..26512250e095 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4784,26 +4784,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
4784 4784
4785static int __net_init addrconf_init_net(struct net *net) 4785static int __net_init addrconf_init_net(struct net *net)
4786{ 4786{
4787 int err; 4787 int err = -ENOMEM;
4788 struct ipv6_devconf *all, *dflt; 4788 struct ipv6_devconf *all, *dflt;
4789 4789
4790 err = -ENOMEM; 4790 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
4791 all = &ipv6_devconf; 4791 if (all == NULL)
4792 dflt = &ipv6_devconf_dflt; 4792 goto err_alloc_all;
4793 4793
4794 if (!net_eq(net, &init_net)) { 4794 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
4795 all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL); 4795 if (dflt == NULL)
4796 if (all == NULL) 4796 goto err_alloc_dflt;
4797 goto err_alloc_all;
4798 4797
4799 dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); 4798 /* these will be inherited by all namespaces */
4800 if (dflt == NULL) 4799 dflt->autoconf = ipv6_defaults.autoconf;
4801 goto err_alloc_dflt; 4800 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
4802 } else {
4803 /* these will be inherited by all namespaces */
4804 dflt->autoconf = ipv6_defaults.autoconf;
4805 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
4806 }
4807 4801
4808 net->ipv6.devconf_all = all; 4802 net->ipv6.devconf_all = all;
4809 net->ipv6.devconf_dflt = dflt; 4803 net->ipv6.devconf_dflt = dflt;
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
index 83acc1405a18..33608c610276 100644
--- a/net/ipv6/netfilter/ip6t_NPT.c
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -114,6 +114,7 @@ ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
114static struct xt_target ip6t_npt_target_reg[] __read_mostly = { 114static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
115 { 115 {
116 .name = "SNPT", 116 .name = "SNPT",
117 .table = "mangle",
117 .target = ip6t_snpt_tg, 118 .target = ip6t_snpt_tg,
118 .targetsize = sizeof(struct ip6t_npt_tginfo), 119 .targetsize = sizeof(struct ip6t_npt_tginfo),
119 .checkentry = ip6t_npt_checkentry, 120 .checkentry = ip6t_npt_checkentry,
@@ -124,6 +125,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
124 }, 125 },
125 { 126 {
126 .name = "DNPT", 127 .name = "DNPT",
128 .table = "mangle",
127 .target = ip6t_dnpt_tg, 129 .target = ip6t_dnpt_tg,
128 .targetsize = sizeof(struct ip6t_npt_tginfo), 130 .targetsize = sizeof(struct ip6t_npt_tginfo),
129 .checkentry = ip6t_npt_checkentry, 131 .checkentry = ip6t_npt_checkentry,
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 54087e96d7b8..6700069949dd 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -14,6 +14,8 @@
14 * 2 of the License, or (at your option) any later version. 14 * 2 of the License, or (at your option) any later version.
15 */ 15 */
16 16
17#define pr_fmt(fmt) "IPv6-nf: " fmt
18
17#include <linux/errno.h> 19#include <linux/errno.h>
18#include <linux/types.h> 20#include <linux/types.h>
19#include <linux/string.h> 21#include <linux/string.h>
@@ -180,13 +182,11 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
180 182
181 q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); 183 q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
182 local_bh_enable(); 184 local_bh_enable();
183 if (q == NULL) 185 if (IS_ERR_OR_NULL(q)) {
184 goto oom; 186 inet_frag_maybe_warn_overflow(q, pr_fmt());
185 187 return NULL;
188 }
186 return container_of(q, struct frag_queue, q); 189 return container_of(q, struct frag_queue, q);
187
188oom:
189 return NULL;
190} 190}
191 191
192 192
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 3c6a77290c6e..196ab9347ad1 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -26,6 +26,9 @@
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to 26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly. 27 * calculate ICV correctly.
28 */ 28 */
29
30#define pr_fmt(fmt) "IPv6: " fmt
31
29#include <linux/errno.h> 32#include <linux/errno.h>
30#include <linux/types.h> 33#include <linux/types.h>
31#include <linux/string.h> 34#include <linux/string.h>
@@ -185,9 +188,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6
185 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); 188 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
186 189
187 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 190 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
188 if (q == NULL) 191 if (IS_ERR_OR_NULL(q)) {
192 inet_frag_maybe_warn_overflow(q, pr_fmt());
189 return NULL; 193 return NULL;
190 194 }
191 return container_of(q, struct frag_queue, q); 195 return container_of(q, struct frag_queue, q);
192} 196}
193 197
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9b6460055df5..f6d629fd6aee 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -389,6 +389,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
389 } 389 }
390 390
391 if (type == ICMPV6_PKT_TOOBIG) { 391 if (type == ICMPV6_PKT_TOOBIG) {
392 /* We are not interested in TCP_LISTEN and open_requests
393 * (SYN-ACKs send out by Linux are always <576bytes so
394 * they should go through unfragmented).
395 */
396 if (sk->sk_state == TCP_LISTEN)
397 goto out;
398
392 tp->mtu_info = ntohl(info); 399 tp->mtu_info = ntohl(info);
393 if (!sock_owned_by_user(sk)) 400 if (!sock_owned_by_user(sk))
394 tcp_v6_mtu_reduced(sk); 401 tcp_v6_mtu_reduced(sk);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 599e1ba6d1ce..d8e5e852fc7a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1285,10 +1285,18 @@ do_confirm:
1285 1285
1286void udpv6_destroy_sock(struct sock *sk) 1286void udpv6_destroy_sock(struct sock *sk)
1287{ 1287{
1288 struct udp_sock *up = udp_sk(sk);
1288 lock_sock(sk); 1289 lock_sock(sk);
1289 udp_v6_flush_pending_frames(sk); 1290 udp_v6_flush_pending_frames(sk);
1290 release_sock(sk); 1291 release_sock(sk);
1291 1292
1293 if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
1294 void (*encap_destroy)(struct sock *sk);
1295 encap_destroy = ACCESS_ONCE(up->encap_destroy);
1296 if (encap_destroy)
1297 encap_destroy(sk);
1298 }
1299
1292 inet6_destroy_sock(sk); 1300 inet6_destroy_sock(sk);
1293} 1301}
1294 1302
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index d07e3a626446..d28e7f014cc6 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2583,8 +2583,10 @@ bed:
2583 NULL, NULL, NULL); 2583 NULL, NULL, NULL);
2584 2584
2585 /* Check if the we got some results */ 2585 /* Check if the we got some results */
2586 if (!self->cachedaddr) 2586 if (!self->cachedaddr) {
2587 return -EAGAIN; /* Didn't find any devices */ 2587 err = -EAGAIN; /* Didn't find any devices */
2588 goto out;
2589 }
2588 daddr = self->cachedaddr; 2590 daddr = self->cachedaddr;
2589 /* Cleanup */ 2591 /* Cleanup */
2590 self->cachedaddr = 0; 2592 self->cachedaddr = 0;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index d36875f3427e..8aecf5df6656 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -114,7 +114,6 @@ struct l2tp_net {
114 114
115static void l2tp_session_set_header_len(struct l2tp_session *session, int version); 115static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
116static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); 116static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
117static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
118 117
119static inline struct l2tp_net *l2tp_pernet(struct net *net) 118static inline struct l2tp_net *l2tp_pernet(struct net *net)
120{ 119{
@@ -192,6 +191,7 @@ struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
192 } else { 191 } else {
193 /* Socket is owned by kernelspace */ 192 /* Socket is owned by kernelspace */
194 sk = tunnel->sock; 193 sk = tunnel->sock;
194 sock_hold(sk);
195 } 195 }
196 196
197out: 197out:
@@ -210,6 +210,7 @@ void l2tp_tunnel_sock_put(struct sock *sk)
210 } 210 }
211 sock_put(sk); 211 sock_put(sk);
212 } 212 }
213 sock_put(sk);
213} 214}
214EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); 215EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
215 216
@@ -373,10 +374,8 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
373 struct sk_buff *skbp; 374 struct sk_buff *skbp;
374 struct sk_buff *tmp; 375 struct sk_buff *tmp;
375 u32 ns = L2TP_SKB_CB(skb)->ns; 376 u32 ns = L2TP_SKB_CB(skb)->ns;
376 struct l2tp_stats *sstats;
377 377
378 spin_lock_bh(&session->reorder_q.lock); 378 spin_lock_bh(&session->reorder_q.lock);
379 sstats = &session->stats;
380 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { 379 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
381 if (L2TP_SKB_CB(skbp)->ns > ns) { 380 if (L2TP_SKB_CB(skbp)->ns > ns) {
382 __skb_queue_before(&session->reorder_q, skbp, skb); 381 __skb_queue_before(&session->reorder_q, skbp, skb);
@@ -384,9 +383,7 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
384 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", 383 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
385 session->name, ns, L2TP_SKB_CB(skbp)->ns, 384 session->name, ns, L2TP_SKB_CB(skbp)->ns,
386 skb_queue_len(&session->reorder_q)); 385 skb_queue_len(&session->reorder_q));
387 u64_stats_update_begin(&sstats->syncp); 386 atomic_long_inc(&session->stats.rx_oos_packets);
388 sstats->rx_oos_packets++;
389 u64_stats_update_end(&sstats->syncp);
390 goto out; 387 goto out;
391 } 388 }
392 } 389 }
@@ -403,23 +400,16 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
403{ 400{
404 struct l2tp_tunnel *tunnel = session->tunnel; 401 struct l2tp_tunnel *tunnel = session->tunnel;
405 int length = L2TP_SKB_CB(skb)->length; 402 int length = L2TP_SKB_CB(skb)->length;
406 struct l2tp_stats *tstats, *sstats;
407 403
408 /* We're about to requeue the skb, so return resources 404 /* We're about to requeue the skb, so return resources
409 * to its current owner (a socket receive buffer). 405 * to its current owner (a socket receive buffer).
410 */ 406 */
411 skb_orphan(skb); 407 skb_orphan(skb);
412 408
413 tstats = &tunnel->stats; 409 atomic_long_inc(&tunnel->stats.rx_packets);
414 u64_stats_update_begin(&tstats->syncp); 410 atomic_long_add(length, &tunnel->stats.rx_bytes);
415 sstats = &session->stats; 411 atomic_long_inc(&session->stats.rx_packets);
416 u64_stats_update_begin(&sstats->syncp); 412 atomic_long_add(length, &session->stats.rx_bytes);
417 tstats->rx_packets++;
418 tstats->rx_bytes += length;
419 sstats->rx_packets++;
420 sstats->rx_bytes += length;
421 u64_stats_update_end(&tstats->syncp);
422 u64_stats_update_end(&sstats->syncp);
423 413
424 if (L2TP_SKB_CB(skb)->has_seq) { 414 if (L2TP_SKB_CB(skb)->has_seq) {
425 /* Bump our Nr */ 415 /* Bump our Nr */
@@ -450,7 +440,6 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
450{ 440{
451 struct sk_buff *skb; 441 struct sk_buff *skb;
452 struct sk_buff *tmp; 442 struct sk_buff *tmp;
453 struct l2tp_stats *sstats;
454 443
455 /* If the pkt at the head of the queue has the nr that we 444 /* If the pkt at the head of the queue has the nr that we
456 * expect to send up next, dequeue it and any other 445 * expect to send up next, dequeue it and any other
@@ -458,13 +447,10 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
458 */ 447 */
459start: 448start:
460 spin_lock_bh(&session->reorder_q.lock); 449 spin_lock_bh(&session->reorder_q.lock);
461 sstats = &session->stats;
462 skb_queue_walk_safe(&session->reorder_q, skb, tmp) { 450 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
463 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { 451 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
464 u64_stats_update_begin(&sstats->syncp); 452 atomic_long_inc(&session->stats.rx_seq_discards);
465 sstats->rx_seq_discards++; 453 atomic_long_inc(&session->stats.rx_errors);
466 sstats->rx_errors++;
467 u64_stats_update_end(&sstats->syncp);
468 l2tp_dbg(session, L2TP_MSG_SEQ, 454 l2tp_dbg(session, L2TP_MSG_SEQ,
469 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", 455 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
470 session->name, L2TP_SKB_CB(skb)->ns, 456 session->name, L2TP_SKB_CB(skb)->ns,
@@ -623,7 +609,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
623 struct l2tp_tunnel *tunnel = session->tunnel; 609 struct l2tp_tunnel *tunnel = session->tunnel;
624 int offset; 610 int offset;
625 u32 ns, nr; 611 u32 ns, nr;
626 struct l2tp_stats *sstats = &session->stats;
627 612
628 /* The ref count is increased since we now hold a pointer to 613 /* The ref count is increased since we now hold a pointer to
629 * the session. Take care to decrement the refcnt when exiting 614 * the session. Take care to decrement the refcnt when exiting
@@ -640,9 +625,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
640 "%s: cookie mismatch (%u/%u). Discarding.\n", 625 "%s: cookie mismatch (%u/%u). Discarding.\n",
641 tunnel->name, tunnel->tunnel_id, 626 tunnel->name, tunnel->tunnel_id,
642 session->session_id); 627 session->session_id);
643 u64_stats_update_begin(&sstats->syncp); 628 atomic_long_inc(&session->stats.rx_cookie_discards);
644 sstats->rx_cookie_discards++;
645 u64_stats_update_end(&sstats->syncp);
646 goto discard; 629 goto discard;
647 } 630 }
648 ptr += session->peer_cookie_len; 631 ptr += session->peer_cookie_len;
@@ -711,9 +694,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
711 l2tp_warn(session, L2TP_MSG_SEQ, 694 l2tp_warn(session, L2TP_MSG_SEQ,
712 "%s: recv data has no seq numbers when required. Discarding.\n", 695 "%s: recv data has no seq numbers when required. Discarding.\n",
713 session->name); 696 session->name);
714 u64_stats_update_begin(&sstats->syncp); 697 atomic_long_inc(&session->stats.rx_seq_discards);
715 sstats->rx_seq_discards++;
716 u64_stats_update_end(&sstats->syncp);
717 goto discard; 698 goto discard;
718 } 699 }
719 700
@@ -732,9 +713,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
732 l2tp_warn(session, L2TP_MSG_SEQ, 713 l2tp_warn(session, L2TP_MSG_SEQ,
733 "%s: recv data has no seq numbers when required. Discarding.\n", 714 "%s: recv data has no seq numbers when required. Discarding.\n",
734 session->name); 715 session->name);
735 u64_stats_update_begin(&sstats->syncp); 716 atomic_long_inc(&session->stats.rx_seq_discards);
736 sstats->rx_seq_discards++;
737 u64_stats_update_end(&sstats->syncp);
738 goto discard; 717 goto discard;
739 } 718 }
740 } 719 }
@@ -788,9 +767,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
788 * packets 767 * packets
789 */ 768 */
790 if (L2TP_SKB_CB(skb)->ns != session->nr) { 769 if (L2TP_SKB_CB(skb)->ns != session->nr) {
791 u64_stats_update_begin(&sstats->syncp); 770 atomic_long_inc(&session->stats.rx_seq_discards);
792 sstats->rx_seq_discards++;
793 u64_stats_update_end(&sstats->syncp);
794 l2tp_dbg(session, L2TP_MSG_SEQ, 771 l2tp_dbg(session, L2TP_MSG_SEQ,
795 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", 772 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
796 session->name, L2TP_SKB_CB(skb)->ns, 773 session->name, L2TP_SKB_CB(skb)->ns,
@@ -816,9 +793,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
816 return; 793 return;
817 794
818discard: 795discard:
819 u64_stats_update_begin(&sstats->syncp); 796 atomic_long_inc(&session->stats.rx_errors);
820 sstats->rx_errors++;
821 u64_stats_update_end(&sstats->syncp);
822 kfree_skb(skb); 797 kfree_skb(skb);
823 798
824 if (session->deref) 799 if (session->deref)
@@ -828,6 +803,23 @@ discard:
828} 803}
829EXPORT_SYMBOL(l2tp_recv_common); 804EXPORT_SYMBOL(l2tp_recv_common);
830 805
806/* Drop skbs from the session's reorder_q
807 */
808int l2tp_session_queue_purge(struct l2tp_session *session)
809{
810 struct sk_buff *skb = NULL;
811 BUG_ON(!session);
812 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
813 while ((skb = skb_dequeue(&session->reorder_q))) {
814 atomic_long_inc(&session->stats.rx_errors);
815 kfree_skb(skb);
816 if (session->deref)
817 (*session->deref)(session);
818 }
819 return 0;
820}
821EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
822
831/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame 823/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
832 * here. The skb is not on a list when we get here. 824 * here. The skb is not on a list when we get here.
833 * Returns 0 if the packet was a data packet and was successfully passed on. 825 * Returns 0 if the packet was a data packet and was successfully passed on.
@@ -843,7 +835,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
843 u32 tunnel_id, session_id; 835 u32 tunnel_id, session_id;
844 u16 version; 836 u16 version;
845 int length; 837 int length;
846 struct l2tp_stats *tstats;
847 838
848 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) 839 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
849 goto discard_bad_csum; 840 goto discard_bad_csum;
@@ -932,10 +923,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
932discard_bad_csum: 923discard_bad_csum:
933 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); 924 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
934 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); 925 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
935 tstats = &tunnel->stats; 926 atomic_long_inc(&tunnel->stats.rx_errors);
936 u64_stats_update_begin(&tstats->syncp);
937 tstats->rx_errors++;
938 u64_stats_update_end(&tstats->syncp);
939 kfree_skb(skb); 927 kfree_skb(skb);
940 928
941 return 0; 929 return 0;
@@ -1062,7 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1062 struct l2tp_tunnel *tunnel = session->tunnel; 1050 struct l2tp_tunnel *tunnel = session->tunnel;
1063 unsigned int len = skb->len; 1051 unsigned int len = skb->len;
1064 int error; 1052 int error;
1065 struct l2tp_stats *tstats, *sstats;
1066 1053
1067 /* Debug */ 1054 /* Debug */
1068 if (session->send_seq) 1055 if (session->send_seq)
@@ -1091,21 +1078,15 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1091 error = ip_queue_xmit(skb, fl); 1078 error = ip_queue_xmit(skb, fl);
1092 1079
1093 /* Update stats */ 1080 /* Update stats */
1094 tstats = &tunnel->stats;
1095 u64_stats_update_begin(&tstats->syncp);
1096 sstats = &session->stats;
1097 u64_stats_update_begin(&sstats->syncp);
1098 if (error >= 0) { 1081 if (error >= 0) {
1099 tstats->tx_packets++; 1082 atomic_long_inc(&tunnel->stats.tx_packets);
1100 tstats->tx_bytes += len; 1083 atomic_long_add(len, &tunnel->stats.tx_bytes);
1101 sstats->tx_packets++; 1084 atomic_long_inc(&session->stats.tx_packets);
1102 sstats->tx_bytes += len; 1085 atomic_long_add(len, &session->stats.tx_bytes);
1103 } else { 1086 } else {
1104 tstats->tx_errors++; 1087 atomic_long_inc(&tunnel->stats.tx_errors);
1105 sstats->tx_errors++; 1088 atomic_long_inc(&session->stats.tx_errors);
1106 } 1089 }
1107 u64_stats_update_end(&tstats->syncp);
1108 u64_stats_update_end(&sstats->syncp);
1109 1090
1110 return 0; 1091 return 0;
1111} 1092}
@@ -1282,6 +1263,7 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1282 /* No longer an encapsulation socket. See net/ipv4/udp.c */ 1263 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1283 (udp_sk(sk))->encap_type = 0; 1264 (udp_sk(sk))->encap_type = 0;
1284 (udp_sk(sk))->encap_rcv = NULL; 1265 (udp_sk(sk))->encap_rcv = NULL;
1266 (udp_sk(sk))->encap_destroy = NULL;
1285 break; 1267 break;
1286 case L2TP_ENCAPTYPE_IP: 1268 case L2TP_ENCAPTYPE_IP:
1287 break; 1269 break;
@@ -1311,7 +1293,7 @@ end:
1311 1293
1312/* When the tunnel is closed, all the attached sessions need to go too. 1294/* When the tunnel is closed, all the attached sessions need to go too.
1313 */ 1295 */
1314static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) 1296void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1315{ 1297{
1316 int hash; 1298 int hash;
1317 struct hlist_node *walk; 1299 struct hlist_node *walk;
@@ -1334,25 +1316,13 @@ again:
1334 1316
1335 hlist_del_init(&session->hlist); 1317 hlist_del_init(&session->hlist);
1336 1318
1337 /* Since we should hold the sock lock while
1338 * doing any unbinding, we need to release the
1339 * lock we're holding before taking that lock.
1340 * Hold a reference to the sock so it doesn't
1341 * disappear as we're jumping between locks.
1342 */
1343 if (session->ref != NULL) 1319 if (session->ref != NULL)
1344 (*session->ref)(session); 1320 (*session->ref)(session);
1345 1321
1346 write_unlock_bh(&tunnel->hlist_lock); 1322 write_unlock_bh(&tunnel->hlist_lock);
1347 1323
1348 if (tunnel->version != L2TP_HDR_VER_2) { 1324 __l2tp_session_unhash(session);
1349 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); 1325 l2tp_session_queue_purge(session);
1350
1351 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1352 hlist_del_init_rcu(&session->global_hlist);
1353 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1354 synchronize_rcu();
1355 }
1356 1326
1357 if (session->session_close != NULL) 1327 if (session->session_close != NULL)
1358 (*session->session_close)(session); 1328 (*session->session_close)(session);
@@ -1360,6 +1330,8 @@ again:
1360 if (session->deref != NULL) 1330 if (session->deref != NULL)
1361 (*session->deref)(session); 1331 (*session->deref)(session);
1362 1332
1333 l2tp_session_dec_refcount(session);
1334
1363 write_lock_bh(&tunnel->hlist_lock); 1335 write_lock_bh(&tunnel->hlist_lock);
1364 1336
1365 /* Now restart from the beginning of this hash 1337 /* Now restart from the beginning of this hash
@@ -1372,6 +1344,17 @@ again:
1372 } 1344 }
1373 write_unlock_bh(&tunnel->hlist_lock); 1345 write_unlock_bh(&tunnel->hlist_lock);
1374} 1346}
1347EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1348
1349/* Tunnel socket destroy hook for UDP encapsulation */
1350static void l2tp_udp_encap_destroy(struct sock *sk)
1351{
1352 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
1353 if (tunnel) {
1354 l2tp_tunnel_closeall(tunnel);
1355 sock_put(sk);
1356 }
1357}
1375 1358
1376/* Really kill the tunnel. 1359/* Really kill the tunnel.
1377 * Come here only when all sessions have been cleared from the tunnel. 1360 * Come here only when all sessions have been cleared from the tunnel.
@@ -1397,19 +1380,21 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
1397 return; 1380 return;
1398 1381
1399 sock = sk->sk_socket; 1382 sock = sk->sk_socket;
1400 BUG_ON(!sock);
1401 1383
1402 /* If the tunnel socket was created directly by the kernel, use the 1384 /* If the tunnel socket was created by userspace, then go through the
1403 * sk_* API to release the socket now. Otherwise go through the 1385 * inet layer to shut the socket down, and let userspace close it.
1404 * inet_* layer to shut the socket down, and let userspace close it. 1386 * Otherwise, if we created the socket directly within the kernel, use
1387 * the sk API to release it here.
1405 * In either case the tunnel resources are freed in the socket 1388 * In either case the tunnel resources are freed in the socket
1406 * destructor when the tunnel socket goes away. 1389 * destructor when the tunnel socket goes away.
1407 */ 1390 */
1408 if (sock->file == NULL) { 1391 if (tunnel->fd >= 0) {
1409 kernel_sock_shutdown(sock, SHUT_RDWR); 1392 if (sock)
1410 sk_release_kernel(sk); 1393 inet_shutdown(sock, 2);
1411 } else { 1394 } else {
1412 inet_shutdown(sock, 2); 1395 if (sock)
1396 kernel_sock_shutdown(sock, SHUT_RDWR);
1397 sk_release_kernel(sk);
1413 } 1398 }
1414 1399
1415 l2tp_tunnel_sock_put(sk); 1400 l2tp_tunnel_sock_put(sk);
@@ -1668,6 +1653,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1668 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1653 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1669 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; 1654 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1670 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; 1655 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1656 udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
1671#if IS_ENABLED(CONFIG_IPV6) 1657#if IS_ENABLED(CONFIG_IPV6)
1672 if (sk->sk_family == PF_INET6) 1658 if (sk->sk_family == PF_INET6)
1673 udpv6_encap_enable(); 1659 udpv6_encap_enable();
@@ -1723,6 +1709,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1723 */ 1709 */
1724int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1710int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1725{ 1711{
1712 l2tp_tunnel_closeall(tunnel);
1726 return (false == queue_work(l2tp_wq, &tunnel->del_work)); 1713 return (false == queue_work(l2tp_wq, &tunnel->del_work));
1727} 1714}
1728EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); 1715EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
@@ -1731,62 +1718,71 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1731 */ 1718 */
1732void l2tp_session_free(struct l2tp_session *session) 1719void l2tp_session_free(struct l2tp_session *session)
1733{ 1720{
1734 struct l2tp_tunnel *tunnel; 1721 struct l2tp_tunnel *tunnel = session->tunnel;
1735 1722
1736 BUG_ON(atomic_read(&session->ref_count) != 0); 1723 BUG_ON(atomic_read(&session->ref_count) != 0);
1737 1724
1738 tunnel = session->tunnel; 1725 if (tunnel) {
1739 if (tunnel != NULL) {
1740 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); 1726 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1727 if (session->session_id != 0)
1728 atomic_dec(&l2tp_session_count);
1729 sock_put(tunnel->sock);
1730 session->tunnel = NULL;
1731 l2tp_tunnel_dec_refcount(tunnel);
1732 }
1733
1734 kfree(session);
1741 1735
1742 /* Delete the session from the hash */ 1736 return;
1737}
1738EXPORT_SYMBOL_GPL(l2tp_session_free);
1739
1740/* Remove an l2tp session from l2tp_core's hash lists.
1741 * Provides a tidyup interface for pseudowire code which can't just route all
1742 * shutdown via. l2tp_session_delete and a pseudowire-specific session_close
1743 * callback.
1744 */
1745void __l2tp_session_unhash(struct l2tp_session *session)
1746{
1747 struct l2tp_tunnel *tunnel = session->tunnel;
1748
1749 /* Remove the session from core hashes */
1750 if (tunnel) {
1751 /* Remove from the per-tunnel hash */
1743 write_lock_bh(&tunnel->hlist_lock); 1752 write_lock_bh(&tunnel->hlist_lock);
1744 hlist_del_init(&session->hlist); 1753 hlist_del_init(&session->hlist);
1745 write_unlock_bh(&tunnel->hlist_lock); 1754 write_unlock_bh(&tunnel->hlist_lock);
1746 1755
1747 /* Unlink from the global hash if not L2TPv2 */ 1756 /* For L2TPv3 we have a per-net hash: remove from there, too */
1748 if (tunnel->version != L2TP_HDR_VER_2) { 1757 if (tunnel->version != L2TP_HDR_VER_2) {
1749 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); 1758 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1750
1751 spin_lock_bh(&pn->l2tp_session_hlist_lock); 1759 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1752 hlist_del_init_rcu(&session->global_hlist); 1760 hlist_del_init_rcu(&session->global_hlist);
1753 spin_unlock_bh(&pn->l2tp_session_hlist_lock); 1761 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1754 synchronize_rcu(); 1762 synchronize_rcu();
1755 } 1763 }
1756
1757 if (session->session_id != 0)
1758 atomic_dec(&l2tp_session_count);
1759
1760 sock_put(tunnel->sock);
1761
1762 /* This will delete the tunnel context if this
1763 * is the last session on the tunnel.
1764 */
1765 session->tunnel = NULL;
1766 l2tp_tunnel_dec_refcount(tunnel);
1767 } 1764 }
1768
1769 kfree(session);
1770
1771 return;
1772} 1765}
1773EXPORT_SYMBOL_GPL(l2tp_session_free); 1766EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
1774 1767
1775/* This function is used by the netlink SESSION_DELETE command and by 1768/* This function is used by the netlink SESSION_DELETE command and by
1776 pseudowire modules. 1769 pseudowire modules.
1777 */ 1770 */
1778int l2tp_session_delete(struct l2tp_session *session) 1771int l2tp_session_delete(struct l2tp_session *session)
1779{ 1772{
1773 if (session->ref)
1774 (*session->ref)(session);
1775 __l2tp_session_unhash(session);
1776 l2tp_session_queue_purge(session);
1780 if (session->session_close != NULL) 1777 if (session->session_close != NULL)
1781 (*session->session_close)(session); 1778 (*session->session_close)(session);
1782 1779 if (session->deref)
1780 (*session->ref)(session);
1783 l2tp_session_dec_refcount(session); 1781 l2tp_session_dec_refcount(session);
1784
1785 return 0; 1782 return 0;
1786} 1783}
1787EXPORT_SYMBOL_GPL(l2tp_session_delete); 1784EXPORT_SYMBOL_GPL(l2tp_session_delete);
1788 1785
1789
1790/* We come here whenever a session's send_seq, cookie_len or 1786/* We come here whenever a session's send_seq, cookie_len or
1791 * l2specific_len parameters are set. 1787 * l2specific_len parameters are set.
1792 */ 1788 */
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 8eb8f1d47f3a..485a490fd990 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -36,16 +36,15 @@ enum {
36struct sk_buff; 36struct sk_buff;
37 37
38struct l2tp_stats { 38struct l2tp_stats {
39 u64 tx_packets; 39 atomic_long_t tx_packets;
40 u64 tx_bytes; 40 atomic_long_t tx_bytes;
41 u64 tx_errors; 41 atomic_long_t tx_errors;
42 u64 rx_packets; 42 atomic_long_t rx_packets;
43 u64 rx_bytes; 43 atomic_long_t rx_bytes;
44 u64 rx_seq_discards; 44 atomic_long_t rx_seq_discards;
45 u64 rx_oos_packets; 45 atomic_long_t rx_oos_packets;
46 u64 rx_errors; 46 atomic_long_t rx_errors;
47 u64 rx_cookie_discards; 47 atomic_long_t rx_cookie_discards;
48 struct u64_stats_sync syncp;
49}; 48};
50 49
51struct l2tp_tunnel; 50struct l2tp_tunnel;
@@ -240,11 +239,14 @@ extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
240extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); 239extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
241 240
242extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); 241extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
242extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
243extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); 243extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
244extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); 244extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
245extern void __l2tp_session_unhash(struct l2tp_session *session);
245extern int l2tp_session_delete(struct l2tp_session *session); 246extern int l2tp_session_delete(struct l2tp_session *session);
246extern void l2tp_session_free(struct l2tp_session *session); 247extern void l2tp_session_free(struct l2tp_session *session);
247extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); 248extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
249extern int l2tp_session_queue_purge(struct l2tp_session *session);
248extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); 250extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
249 251
250extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); 252extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index c3813bc84552..072d7202e182 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -146,14 +146,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
146 tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, 146 tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,
147 atomic_read(&tunnel->ref_count)); 147 atomic_read(&tunnel->ref_count));
148 148
149 seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n", 149 seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",
150 tunnel->debug, 150 tunnel->debug,
151 (unsigned long long)tunnel->stats.tx_packets, 151 atomic_long_read(&tunnel->stats.tx_packets),
152 (unsigned long long)tunnel->stats.tx_bytes, 152 atomic_long_read(&tunnel->stats.tx_bytes),
153 (unsigned long long)tunnel->stats.tx_errors, 153 atomic_long_read(&tunnel->stats.tx_errors),
154 (unsigned long long)tunnel->stats.rx_packets, 154 atomic_long_read(&tunnel->stats.rx_packets),
155 (unsigned long long)tunnel->stats.rx_bytes, 155 atomic_long_read(&tunnel->stats.rx_bytes),
156 (unsigned long long)tunnel->stats.rx_errors); 156 atomic_long_read(&tunnel->stats.rx_errors));
157 157
158 if (tunnel->show != NULL) 158 if (tunnel->show != NULL)
159 tunnel->show(m, tunnel); 159 tunnel->show(m, tunnel);
@@ -203,14 +203,14 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
203 seq_printf(m, "\n"); 203 seq_printf(m, "\n");
204 } 204 }
205 205
206 seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n", 206 seq_printf(m, " %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n",
207 session->nr, session->ns, 207 session->nr, session->ns,
208 (unsigned long long)session->stats.tx_packets, 208 atomic_long_read(&session->stats.tx_packets),
209 (unsigned long long)session->stats.tx_bytes, 209 atomic_long_read(&session->stats.tx_bytes),
210 (unsigned long long)session->stats.tx_errors, 210 atomic_long_read(&session->stats.tx_errors),
211 (unsigned long long)session->stats.rx_packets, 211 atomic_long_read(&session->stats.rx_packets),
212 (unsigned long long)session->stats.rx_bytes, 212 atomic_long_read(&session->stats.rx_bytes),
213 (unsigned long long)session->stats.rx_errors); 213 atomic_long_read(&session->stats.rx_errors));
214 214
215 if (session->show != NULL) 215 if (session->show != NULL)
216 session->show(m, session); 216 session->show(m, session);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 7f41b7051269..571db8dd2292 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -228,10 +228,16 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
228static void l2tp_ip_destroy_sock(struct sock *sk) 228static void l2tp_ip_destroy_sock(struct sock *sk)
229{ 229{
230 struct sk_buff *skb; 230 struct sk_buff *skb;
231 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
231 232
232 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) 233 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
233 kfree_skb(skb); 234 kfree_skb(skb);
234 235
236 if (tunnel) {
237 l2tp_tunnel_closeall(tunnel);
238 sock_put(sk);
239 }
240
235 sk_refcnt_debug_dec(sk); 241 sk_refcnt_debug_dec(sk);
236} 242}
237 243
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 41f2f8126ebc..c74f5a91ff6a 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -241,10 +241,17 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)
241 241
242static void l2tp_ip6_destroy_sock(struct sock *sk) 242static void l2tp_ip6_destroy_sock(struct sock *sk)
243{ 243{
244 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
245
244 lock_sock(sk); 246 lock_sock(sk);
245 ip6_flush_pending_frames(sk); 247 ip6_flush_pending_frames(sk);
246 release_sock(sk); 248 release_sock(sk);
247 249
250 if (tunnel) {
251 l2tp_tunnel_closeall(tunnel);
252 sock_put(sk);
253 }
254
248 inet6_destroy_sock(sk); 255 inet6_destroy_sock(sk);
249} 256}
250 257
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index c1bab22db85e..0825ff26e113 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -246,8 +246,6 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
246#if IS_ENABLED(CONFIG_IPV6) 246#if IS_ENABLED(CONFIG_IPV6)
247 struct ipv6_pinfo *np = NULL; 247 struct ipv6_pinfo *np = NULL;
248#endif 248#endif
249 struct l2tp_stats stats;
250 unsigned int start;
251 249
252 hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, 250 hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags,
253 L2TP_CMD_TUNNEL_GET); 251 L2TP_CMD_TUNNEL_GET);
@@ -265,28 +263,22 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
265 if (nest == NULL) 263 if (nest == NULL)
266 goto nla_put_failure; 264 goto nla_put_failure;
267 265
268 do { 266 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
269 start = u64_stats_fetch_begin(&tunnel->stats.syncp); 267 atomic_long_read(&tunnel->stats.tx_packets)) ||
270 stats.tx_packets = tunnel->stats.tx_packets; 268 nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
271 stats.tx_bytes = tunnel->stats.tx_bytes; 269 atomic_long_read(&tunnel->stats.tx_bytes)) ||
272 stats.tx_errors = tunnel->stats.tx_errors; 270 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
273 stats.rx_packets = tunnel->stats.rx_packets; 271 atomic_long_read(&tunnel->stats.tx_errors)) ||
274 stats.rx_bytes = tunnel->stats.rx_bytes; 272 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
275 stats.rx_errors = tunnel->stats.rx_errors; 273 atomic_long_read(&tunnel->stats.rx_packets)) ||
276 stats.rx_seq_discards = tunnel->stats.rx_seq_discards; 274 nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
277 stats.rx_oos_packets = tunnel->stats.rx_oos_packets; 275 atomic_long_read(&tunnel->stats.rx_bytes)) ||
278 } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start));
279
280 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
281 nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
282 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
283 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
284 nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
285 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, 276 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
286 stats.rx_seq_discards) || 277 atomic_long_read(&tunnel->stats.rx_seq_discards)) ||
287 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, 278 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
288 stats.rx_oos_packets) || 279 atomic_long_read(&tunnel->stats.rx_oos_packets)) ||
289 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) 280 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
281 atomic_long_read(&tunnel->stats.rx_errors)))
290 goto nla_put_failure; 282 goto nla_put_failure;
291 nla_nest_end(skb, nest); 283 nla_nest_end(skb, nest);
292 284
@@ -612,8 +604,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
612 struct nlattr *nest; 604 struct nlattr *nest;
613 struct l2tp_tunnel *tunnel = session->tunnel; 605 struct l2tp_tunnel *tunnel = session->tunnel;
614 struct sock *sk = NULL; 606 struct sock *sk = NULL;
615 struct l2tp_stats stats;
616 unsigned int start;
617 607
618 sk = tunnel->sock; 608 sk = tunnel->sock;
619 609
@@ -656,28 +646,22 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
656 if (nest == NULL) 646 if (nest == NULL)
657 goto nla_put_failure; 647 goto nla_put_failure;
658 648
659 do { 649 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
660 start = u64_stats_fetch_begin(&session->stats.syncp); 650 atomic_long_read(&session->stats.tx_packets)) ||
661 stats.tx_packets = session->stats.tx_packets; 651 nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
662 stats.tx_bytes = session->stats.tx_bytes; 652 atomic_long_read(&session->stats.tx_bytes)) ||
663 stats.tx_errors = session->stats.tx_errors; 653 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
664 stats.rx_packets = session->stats.rx_packets; 654 atomic_long_read(&session->stats.tx_errors)) ||
665 stats.rx_bytes = session->stats.rx_bytes; 655 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
666 stats.rx_errors = session->stats.rx_errors; 656 atomic_long_read(&session->stats.rx_packets)) ||
667 stats.rx_seq_discards = session->stats.rx_seq_discards; 657 nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
668 stats.rx_oos_packets = session->stats.rx_oos_packets; 658 atomic_long_read(&session->stats.rx_bytes)) ||
669 } while (u64_stats_fetch_retry(&session->stats.syncp, start));
670
671 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
672 nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
673 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
674 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
675 nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
676 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, 659 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
677 stats.rx_seq_discards) || 660 atomic_long_read(&session->stats.rx_seq_discards)) ||
678 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, 661 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
679 stats.rx_oos_packets) || 662 atomic_long_read(&session->stats.rx_oos_packets)) ||
680 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) 663 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
664 atomic_long_read(&session->stats.rx_errors)))
681 goto nla_put_failure; 665 goto nla_put_failure;
682 nla_nest_end(skb, nest); 666 nla_nest_end(skb, nest);
683 667
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 6a53371dba1f..637a341c1e2d 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -97,6 +97,7 @@
97#include <net/ip.h> 97#include <net/ip.h>
98#include <net/udp.h> 98#include <net/udp.h>
99#include <net/xfrm.h> 99#include <net/xfrm.h>
100#include <net/inet_common.h>
100 101
101#include <asm/byteorder.h> 102#include <asm/byteorder.h>
102#include <linux/atomic.h> 103#include <linux/atomic.h>
@@ -259,7 +260,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
259 session->name); 260 session->name);
260 261
261 /* Not bound. Nothing we can do, so discard. */ 262 /* Not bound. Nothing we can do, so discard. */
262 session->stats.rx_errors++; 263 atomic_long_inc(&session->stats.rx_errors);
263 kfree_skb(skb); 264 kfree_skb(skb);
264 } 265 }
265 266
@@ -447,34 +448,16 @@ static void pppol2tp_session_close(struct l2tp_session *session)
447{ 448{
448 struct pppol2tp_session *ps = l2tp_session_priv(session); 449 struct pppol2tp_session *ps = l2tp_session_priv(session);
449 struct sock *sk = ps->sock; 450 struct sock *sk = ps->sock;
450 struct sk_buff *skb; 451 struct socket *sock = sk->sk_socket;
451 452
452 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 453 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
453 454
454 if (session->session_id == 0)
455 goto out;
456
457 if (sk != NULL) {
458 lock_sock(sk);
459
460 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
461 pppox_unbind_sock(sk);
462 sk->sk_state = PPPOX_DEAD;
463 sk->sk_state_change(sk);
464 }
465
466 /* Purge any queued data */
467 skb_queue_purge(&sk->sk_receive_queue);
468 skb_queue_purge(&sk->sk_write_queue);
469 while ((skb = skb_dequeue(&session->reorder_q))) {
470 kfree_skb(skb);
471 sock_put(sk);
472 }
473 455
474 release_sock(sk); 456 if (sock) {
457 inet_shutdown(sock, 2);
458 /* Don't let the session go away before our socket does */
459 l2tp_session_inc_refcount(session);
475 } 460 }
476
477out:
478 return; 461 return;
479} 462}
480 463
@@ -483,19 +466,12 @@ out:
483 */ 466 */
484static void pppol2tp_session_destruct(struct sock *sk) 467static void pppol2tp_session_destruct(struct sock *sk)
485{ 468{
486 struct l2tp_session *session; 469 struct l2tp_session *session = sk->sk_user_data;
487 470 if (session) {
488 if (sk->sk_user_data != NULL) {
489 session = sk->sk_user_data;
490 if (session == NULL)
491 goto out;
492
493 sk->sk_user_data = NULL; 471 sk->sk_user_data = NULL;
494 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 472 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
495 l2tp_session_dec_refcount(session); 473 l2tp_session_dec_refcount(session);
496 } 474 }
497
498out:
499 return; 475 return;
500} 476}
501 477
@@ -525,16 +501,13 @@ static int pppol2tp_release(struct socket *sock)
525 session = pppol2tp_sock_to_session(sk); 501 session = pppol2tp_sock_to_session(sk);
526 502
527 /* Purge any queued data */ 503 /* Purge any queued data */
528 skb_queue_purge(&sk->sk_receive_queue);
529 skb_queue_purge(&sk->sk_write_queue);
530 if (session != NULL) { 504 if (session != NULL) {
531 struct sk_buff *skb; 505 __l2tp_session_unhash(session);
532 while ((skb = skb_dequeue(&session->reorder_q))) { 506 l2tp_session_queue_purge(session);
533 kfree_skb(skb);
534 sock_put(sk);
535 }
536 sock_put(sk); 507 sock_put(sk);
537 } 508 }
509 skb_queue_purge(&sk->sk_receive_queue);
510 skb_queue_purge(&sk->sk_write_queue);
538 511
539 release_sock(sk); 512 release_sock(sk);
540 513
@@ -880,18 +853,6 @@ out:
880 return error; 853 return error;
881} 854}
882 855
883/* Called when deleting sessions via the netlink interface.
884 */
885static int pppol2tp_session_delete(struct l2tp_session *session)
886{
887 struct pppol2tp_session *ps = l2tp_session_priv(session);
888
889 if (ps->sock == NULL)
890 l2tp_session_dec_refcount(session);
891
892 return 0;
893}
894
895#endif /* CONFIG_L2TP_V3 */ 856#endif /* CONFIG_L2TP_V3 */
896 857
897/* getname() support. 858/* getname() support.
@@ -1025,14 +986,14 @@ end:
1025static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, 986static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
1026 struct l2tp_stats *stats) 987 struct l2tp_stats *stats)
1027{ 988{
1028 dest->tx_packets = stats->tx_packets; 989 dest->tx_packets = atomic_long_read(&stats->tx_packets);
1029 dest->tx_bytes = stats->tx_bytes; 990 dest->tx_bytes = atomic_long_read(&stats->tx_bytes);
1030 dest->tx_errors = stats->tx_errors; 991 dest->tx_errors = atomic_long_read(&stats->tx_errors);
1031 dest->rx_packets = stats->rx_packets; 992 dest->rx_packets = atomic_long_read(&stats->rx_packets);
1032 dest->rx_bytes = stats->rx_bytes; 993 dest->rx_bytes = atomic_long_read(&stats->rx_bytes);
1033 dest->rx_seq_discards = stats->rx_seq_discards; 994 dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards);
1034 dest->rx_oos_packets = stats->rx_oos_packets; 995 dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets);
1035 dest->rx_errors = stats->rx_errors; 996 dest->rx_errors = atomic_long_read(&stats->rx_errors);
1036} 997}
1037 998
1038/* Session ioctl helper. 999/* Session ioctl helper.
@@ -1666,14 +1627,14 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
1666 tunnel->name, 1627 tunnel->name,
1667 (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', 1628 (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
1668 atomic_read(&tunnel->ref_count) - 1); 1629 atomic_read(&tunnel->ref_count) - 1);
1669 seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", 1630 seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n",
1670 tunnel->debug, 1631 tunnel->debug,
1671 (unsigned long long)tunnel->stats.tx_packets, 1632 atomic_long_read(&tunnel->stats.tx_packets),
1672 (unsigned long long)tunnel->stats.tx_bytes, 1633 atomic_long_read(&tunnel->stats.tx_bytes),
1673 (unsigned long long)tunnel->stats.tx_errors, 1634 atomic_long_read(&tunnel->stats.tx_errors),
1674 (unsigned long long)tunnel->stats.rx_packets, 1635 atomic_long_read(&tunnel->stats.rx_packets),
1675 (unsigned long long)tunnel->stats.rx_bytes, 1636 atomic_long_read(&tunnel->stats.rx_bytes),
1676 (unsigned long long)tunnel->stats.rx_errors); 1637 atomic_long_read(&tunnel->stats.rx_errors));
1677} 1638}
1678 1639
1679static void pppol2tp_seq_session_show(struct seq_file *m, void *v) 1640static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
@@ -1708,14 +1669,14 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
1708 session->lns_mode ? "LNS" : "LAC", 1669 session->lns_mode ? "LNS" : "LAC",
1709 session->debug, 1670 session->debug,
1710 jiffies_to_msecs(session->reorder_timeout)); 1671 jiffies_to_msecs(session->reorder_timeout));
1711 seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", 1672 seq_printf(m, " %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n",
1712 session->nr, session->ns, 1673 session->nr, session->ns,
1713 (unsigned long long)session->stats.tx_packets, 1674 atomic_long_read(&session->stats.tx_packets),
1714 (unsigned long long)session->stats.tx_bytes, 1675 atomic_long_read(&session->stats.tx_bytes),
1715 (unsigned long long)session->stats.tx_errors, 1676 atomic_long_read(&session->stats.tx_errors),
1716 (unsigned long long)session->stats.rx_packets, 1677 atomic_long_read(&session->stats.rx_packets),
1717 (unsigned long long)session->stats.rx_bytes, 1678 atomic_long_read(&session->stats.rx_bytes),
1718 (unsigned long long)session->stats.rx_errors); 1679 atomic_long_read(&session->stats.rx_errors));
1719 1680
1720 if (po) 1681 if (po)
1721 seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); 1682 seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
@@ -1839,7 +1800,7 @@ static const struct pppox_proto pppol2tp_proto = {
1839 1800
1840static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { 1801static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = {
1841 .session_create = pppol2tp_session_create, 1802 .session_create = pppol2tp_session_create,
1842 .session_delete = pppol2tp_session_delete, 1803 .session_delete = l2tp_session_delete,
1843}; 1804};
1844 1805
1845#endif /* CONFIG_L2TP_V3 */ 1806#endif /* CONFIG_L2TP_V3 */
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 47edf5a40a59..61f49d241712 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1394,10 +1394,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1394 skb_reset_network_header(skb); 1394 skb_reset_network_header(skb);
1395 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", 1395 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
1396 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); 1396 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
1397 rcu_read_lock();
1398 ipv4_update_pmtu(skb, dev_net(skb->dev), 1397 ipv4_update_pmtu(skb, dev_net(skb->dev),
1399 mtu, 0, 0, 0, 0); 1398 mtu, 0, 0, 0, 0);
1400 rcu_read_unlock();
1401 /* Client uses PMTUD? */ 1399 /* Client uses PMTUD? */
1402 if (!(cih->frag_off & htons(IP_DF))) 1400 if (!(cih->frag_off & htons(IP_DF)))
1403 goto ignore_ipip; 1401 goto ignore_ipip;
@@ -1577,7 +1575,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1577 } 1575 }
1578 /* ipvs enabled in this netns ? */ 1576 /* ipvs enabled in this netns ? */
1579 net = skb_net(skb); 1577 net = skb_net(skb);
1580 if (!net_ipvs(net)->enable) 1578 ipvs = net_ipvs(net);
1579 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1581 return NF_ACCEPT; 1580 return NF_ACCEPT;
1582 1581
1583 ip_vs_fill_iph_skb(af, skb, &iph); 1582 ip_vs_fill_iph_skb(af, skb, &iph);
@@ -1654,7 +1653,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1654 } 1653 }
1655 1654
1656 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); 1655 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
1657 ipvs = net_ipvs(net);
1658 /* Check the server status */ 1656 /* Check the server status */
1659 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { 1657 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1660 /* the destination server is not available */ 1658 /* the destination server is not available */
@@ -1815,13 +1813,15 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
1815{ 1813{
1816 int r; 1814 int r;
1817 struct net *net; 1815 struct net *net;
1816 struct netns_ipvs *ipvs;
1818 1817
1819 if (ip_hdr(skb)->protocol != IPPROTO_ICMP) 1818 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
1820 return NF_ACCEPT; 1819 return NF_ACCEPT;
1821 1820
1822 /* ipvs enabled in this netns ? */ 1821 /* ipvs enabled in this netns ? */
1823 net = skb_net(skb); 1822 net = skb_net(skb);
1824 if (!net_ipvs(net)->enable) 1823 ipvs = net_ipvs(net);
1824 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1825 return NF_ACCEPT; 1825 return NF_ACCEPT;
1826 1826
1827 return ip_vs_in_icmp(skb, &r, hooknum); 1827 return ip_vs_in_icmp(skb, &r, hooknum);
@@ -1835,6 +1835,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1835{ 1835{
1836 int r; 1836 int r;
1837 struct net *net; 1837 struct net *net;
1838 struct netns_ipvs *ipvs;
1838 struct ip_vs_iphdr iphdr; 1839 struct ip_vs_iphdr iphdr;
1839 1840
1840 ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr); 1841 ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr);
@@ -1843,7 +1844,8 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1843 1844
1844 /* ipvs enabled in this netns ? */ 1845 /* ipvs enabled in this netns ? */
1845 net = skb_net(skb); 1846 net = skb_net(skb);
1846 if (!net_ipvs(net)->enable) 1847 ipvs = net_ipvs(net);
1848 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1847 return NF_ACCEPT; 1849 return NF_ACCEPT;
1848 1850
1849 return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr); 1851 return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c68198bf9128..9e2d1cccd1eb 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1808,6 +1808,12 @@ static struct ctl_table vs_vars[] = {
1808 .mode = 0644, 1808 .mode = 0644,
1809 .proc_handler = proc_dointvec, 1809 .proc_handler = proc_dointvec,
1810 }, 1810 },
1811 {
1812 .procname = "backup_only",
1813 .maxlen = sizeof(int),
1814 .mode = 0644,
1815 .proc_handler = proc_dointvec,
1816 },
1811#ifdef CONFIG_IP_VS_DEBUG 1817#ifdef CONFIG_IP_VS_DEBUG
1812 { 1818 {
1813 .procname = "debug_level", 1819 .procname = "debug_level",
@@ -3741,6 +3747,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3741 tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; 3747 tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
3742 ipvs->sysctl_pmtu_disc = 1; 3748 ipvs->sysctl_pmtu_disc = 1;
3743 tbl[idx++].data = &ipvs->sysctl_pmtu_disc; 3749 tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
3750 tbl[idx++].data = &ipvs->sysctl_backup_only;
3744 3751
3745 3752
3746 ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); 3753 ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index ae8ec6f27688..cd1d7298f7ba 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -906,7 +906,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
906 sctp_chunkhdr_t _sctpch, *sch; 906 sctp_chunkhdr_t _sctpch, *sch;
907 unsigned char chunk_type; 907 unsigned char chunk_type;
908 int event, next_state; 908 int event, next_state;
909 int ihl; 909 int ihl, cofs;
910 910
911#ifdef CONFIG_IP_VS_IPV6 911#ifdef CONFIG_IP_VS_IPV6
912 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); 912 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
@@ -914,8 +914,8 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
914 ihl = ip_hdrlen(skb); 914 ihl = ip_hdrlen(skb);
915#endif 915#endif
916 916
917 sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t), 917 cofs = ihl + sizeof(sctp_sctphdr_t);
918 sizeof(_sctpch), &_sctpch); 918 sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch);
919 if (sch == NULL) 919 if (sch == NULL)
920 return; 920 return;
921 921
@@ -933,10 +933,12 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
933 */ 933 */
934 if ((sch->type == SCTP_CID_COOKIE_ECHO) || 934 if ((sch->type == SCTP_CID_COOKIE_ECHO) ||
935 (sch->type == SCTP_CID_COOKIE_ACK)) { 935 (sch->type == SCTP_CID_COOKIE_ACK)) {
936 sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) + 936 int clen = ntohs(sch->length);
937 sch->length), sizeof(_sctpch), &_sctpch); 937
938 if (sch) { 938 if (clen >= sizeof(sctp_chunkhdr_t)) {
939 if (sch->type == SCTP_CID_ABORT) 939 sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4),
940 sizeof(_sctpch), &_sctpch);
941 if (sch && sch->type == SCTP_CID_ABORT)
940 chunk_type = sch->type; 942 chunk_type = sch->type;
941 } 943 }
942 } 944 }
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 432f95780003..ba65b2041eb4 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -969,6 +969,10 @@ static int __init nf_conntrack_proto_dccp_init(void)
969{ 969{
970 int ret; 970 int ret;
971 971
972 ret = register_pernet_subsys(&dccp_net_ops);
973 if (ret < 0)
974 goto out_pernet;
975
972 ret = nf_ct_l4proto_register(&dccp_proto4); 976 ret = nf_ct_l4proto_register(&dccp_proto4);
973 if (ret < 0) 977 if (ret < 0)
974 goto out_dccp4; 978 goto out_dccp4;
@@ -977,16 +981,12 @@ static int __init nf_conntrack_proto_dccp_init(void)
977 if (ret < 0) 981 if (ret < 0)
978 goto out_dccp6; 982 goto out_dccp6;
979 983
980 ret = register_pernet_subsys(&dccp_net_ops);
981 if (ret < 0)
982 goto out_pernet;
983
984 return 0; 984 return 0;
985out_pernet:
986 nf_ct_l4proto_unregister(&dccp_proto6);
987out_dccp6: 985out_dccp6:
988 nf_ct_l4proto_unregister(&dccp_proto4); 986 nf_ct_l4proto_unregister(&dccp_proto4);
989out_dccp4: 987out_dccp4:
988 unregister_pernet_subsys(&dccp_net_ops);
989out_pernet:
990 return ret; 990 return ret;
991} 991}
992 992
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index bd7d01d9c7e7..155ce9f8a0db 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -420,18 +420,18 @@ static int __init nf_ct_proto_gre_init(void)
420{ 420{
421 int ret; 421 int ret;
422 422
423 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4);
424 if (ret < 0)
425 goto out_gre4;
426
427 ret = register_pernet_subsys(&proto_gre_net_ops); 423 ret = register_pernet_subsys(&proto_gre_net_ops);
428 if (ret < 0) 424 if (ret < 0)
429 goto out_pernet; 425 goto out_pernet;
430 426
427 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4);
428 if (ret < 0)
429 goto out_gre4;
430
431 return 0; 431 return 0;
432out_pernet:
433 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4);
434out_gre4: 432out_gre4:
433 unregister_pernet_subsys(&proto_gre_net_ops);
434out_pernet:
435 return ret; 435 return ret;
436} 436}
437 437
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 480f616d5936..ec83536def9a 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -888,6 +888,10 @@ static int __init nf_conntrack_proto_sctp_init(void)
888{ 888{
889 int ret; 889 int ret;
890 890
891 ret = register_pernet_subsys(&sctp_net_ops);
892 if (ret < 0)
893 goto out_pernet;
894
891 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4); 895 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4);
892 if (ret < 0) 896 if (ret < 0)
893 goto out_sctp4; 897 goto out_sctp4;
@@ -896,16 +900,12 @@ static int __init nf_conntrack_proto_sctp_init(void)
896 if (ret < 0) 900 if (ret < 0)
897 goto out_sctp6; 901 goto out_sctp6;
898 902
899 ret = register_pernet_subsys(&sctp_net_ops);
900 if (ret < 0)
901 goto out_pernet;
902
903 return 0; 903 return 0;
904out_pernet:
905 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6);
906out_sctp6: 904out_sctp6:
907 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4); 905 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
908out_sctp4: 906out_sctp4:
907 unregister_pernet_subsys(&sctp_net_ops);
908out_pernet:
909 return ret; 909 return ret;
910} 910}
911 911
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 157489581c31..ca969f6273f7 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -371,6 +371,10 @@ static int __init nf_conntrack_proto_udplite_init(void)
371{ 371{
372 int ret; 372 int ret;
373 373
374 ret = register_pernet_subsys(&udplite_net_ops);
375 if (ret < 0)
376 goto out_pernet;
377
374 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4); 378 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4);
375 if (ret < 0) 379 if (ret < 0)
376 goto out_udplite4; 380 goto out_udplite4;
@@ -379,16 +383,12 @@ static int __init nf_conntrack_proto_udplite_init(void)
379 if (ret < 0) 383 if (ret < 0)
380 goto out_udplite6; 384 goto out_udplite6;
381 385
382 ret = register_pernet_subsys(&udplite_net_ops);
383 if (ret < 0)
384 goto out_pernet;
385
386 return 0; 386 return 0;
387out_pernet:
388 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6);
389out_udplite6: 387out_udplite6:
390 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4); 388 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
391out_udplite4: 389out_udplite4:
390 unregister_pernet_subsys(&udplite_net_ops);
391out_pernet:
392 return ret; 392 return ret;
393} 393}
394 394
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 858fd52c1040..1cb48540f86a 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -112,7 +112,7 @@ instance_create(u_int16_t queue_num, int portid)
112 inst->queue_num = queue_num; 112 inst->queue_num = queue_num;
113 inst->peer_portid = portid; 113 inst->peer_portid = portid;
114 inst->queue_maxlen = NFQNL_QMAX_DEFAULT; 114 inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
115 inst->copy_range = 0xfffff; 115 inst->copy_range = 0xffff;
116 inst->copy_mode = NFQNL_COPY_NONE; 116 inst->copy_mode = NFQNL_COPY_NONE;
117 spin_lock_init(&inst->lock); 117 spin_lock_init(&inst->lock);
118 INIT_LIST_HEAD(&inst->queue_list); 118 INIT_LIST_HEAD(&inst->queue_list);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index f2aabb6f4105..5a55be3f17a5 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -142,6 +142,7 @@ int genl_register_mc_group(struct genl_family *family,
142 int err = 0; 142 int err = 0;
143 143
144 BUG_ON(grp->name[0] == '\0'); 144 BUG_ON(grp->name[0] == '\0');
145 BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL);
145 146
146 genl_lock(); 147 genl_lock();
147 148
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 7f8266dd14cb..b530afadd76c 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -68,7 +68,8 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)
68 } 68 }
69} 69}
70 70
71static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) 71static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen,
72 int err)
72{ 73{
73 struct sock *sk; 74 struct sock *sk;
74 struct hlist_node *tmp; 75 struct hlist_node *tmp;
@@ -100,7 +101,10 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
100 101
101 nfc_llcp_accept_unlink(accept_sk); 102 nfc_llcp_accept_unlink(accept_sk);
102 103
104 if (err)
105 accept_sk->sk_err = err;
103 accept_sk->sk_state = LLCP_CLOSED; 106 accept_sk->sk_state = LLCP_CLOSED;
107 accept_sk->sk_state_change(sk);
104 108
105 bh_unlock_sock(accept_sk); 109 bh_unlock_sock(accept_sk);
106 110
@@ -123,7 +127,10 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
123 continue; 127 continue;
124 } 128 }
125 129
130 if (err)
131 sk->sk_err = err;
126 sk->sk_state = LLCP_CLOSED; 132 sk->sk_state = LLCP_CLOSED;
133 sk->sk_state_change(sk);
127 134
128 bh_unlock_sock(sk); 135 bh_unlock_sock(sk);
129 136
@@ -133,6 +140,36 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
133 } 140 }
134 141
135 write_unlock(&local->sockets.lock); 142 write_unlock(&local->sockets.lock);
143
144 /*
145 * If we want to keep the listening sockets alive,
146 * we don't touch the RAW ones.
147 */
148 if (listen == true)
149 return;
150
151 write_lock(&local->raw_sockets.lock);
152
153 sk_for_each_safe(sk, tmp, &local->raw_sockets.head) {
154 llcp_sock = nfc_llcp_sock(sk);
155
156 bh_lock_sock(sk);
157
158 nfc_llcp_socket_purge(llcp_sock);
159
160 if (err)
161 sk->sk_err = err;
162 sk->sk_state = LLCP_CLOSED;
163 sk->sk_state_change(sk);
164
165 bh_unlock_sock(sk);
166
167 sock_orphan(sk);
168
169 sk_del_node_init(sk);
170 }
171
172 write_unlock(&local->raw_sockets.lock);
136} 173}
137 174
138struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) 175struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
@@ -142,20 +179,25 @@ struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
142 return local; 179 return local;
143} 180}
144 181
145static void local_release(struct kref *ref) 182static void local_cleanup(struct nfc_llcp_local *local, bool listen)
146{ 183{
147 struct nfc_llcp_local *local; 184 nfc_llcp_socket_release(local, listen, ENXIO);
148
149 local = container_of(ref, struct nfc_llcp_local, ref);
150
151 list_del(&local->list);
152 nfc_llcp_socket_release(local, false);
153 del_timer_sync(&local->link_timer); 185 del_timer_sync(&local->link_timer);
154 skb_queue_purge(&local->tx_queue); 186 skb_queue_purge(&local->tx_queue);
155 cancel_work_sync(&local->tx_work); 187 cancel_work_sync(&local->tx_work);
156 cancel_work_sync(&local->rx_work); 188 cancel_work_sync(&local->rx_work);
157 cancel_work_sync(&local->timeout_work); 189 cancel_work_sync(&local->timeout_work);
158 kfree_skb(local->rx_pending); 190 kfree_skb(local->rx_pending);
191}
192
193static void local_release(struct kref *ref)
194{
195 struct nfc_llcp_local *local;
196
197 local = container_of(ref, struct nfc_llcp_local, ref);
198
199 list_del(&local->list);
200 local_cleanup(local, false);
159 kfree(local); 201 kfree(local);
160} 202}
161 203
@@ -1348,7 +1390,7 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
1348 return; 1390 return;
1349 1391
1350 /* Close and purge all existing sockets */ 1392 /* Close and purge all existing sockets */
1351 nfc_llcp_socket_release(local, true); 1393 nfc_llcp_socket_release(local, true, 0);
1352} 1394}
1353 1395
1354void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, 1396void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
@@ -1427,6 +1469,8 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev)
1427 return; 1469 return;
1428 } 1470 }
1429 1471
1472 local_cleanup(local, false);
1473
1430 nfc_llcp_local_put(local); 1474 nfc_llcp_local_put(local);
1431} 1475}
1432 1476
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 5332751943a9..5c7cdf3f2a83 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -278,6 +278,8 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
278 278
279 pr_debug("Returning sk state %d\n", sk->sk_state); 279 pr_debug("Returning sk state %d\n", sk->sk_state);
280 280
281 sk_acceptq_removed(parent);
282
281 return sk; 283 return sk;
282 } 284 }
283 285
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index ac2defeeba83..d4d5363c7ba7 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -58,7 +58,7 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
58 58
59 if (skb->ip_summed == CHECKSUM_COMPLETE) 59 if (skb->ip_summed == CHECKSUM_COMPLETE)
60 skb->csum = csum_sub(skb->csum, csum_partial(skb->data 60 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
61 + ETH_HLEN, VLAN_HLEN, 0)); 61 + (2 * ETH_ALEN), VLAN_HLEN, 0));
62 62
63 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 63 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
64 *current_tci = vhdr->h_vlan_TCI; 64 *current_tci = vhdr->h_vlan_TCI;
@@ -115,7 +115,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla
115 115
116 if (skb->ip_summed == CHECKSUM_COMPLETE) 116 if (skb->ip_summed == CHECKSUM_COMPLETE)
117 skb->csum = csum_add(skb->csum, csum_partial(skb->data 117 skb->csum = csum_add(skb->csum, csum_partial(skb->data
118 + ETH_HLEN, VLAN_HLEN, 0)); 118 + (2 * ETH_ALEN), VLAN_HLEN, 0));
119 119
120 } 120 }
121 __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); 121 __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e87a26506dba..a4b724708a1a 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -394,6 +394,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
394 394
395 skb_copy_and_csum_dev(skb, nla_data(nla)); 395 skb_copy_and_csum_dev(skb, nla_data(nla));
396 396
397 genlmsg_end(user_skb, upcall);
397 err = genlmsg_unicast(net, user_skb, upcall_info->portid); 398 err = genlmsg_unicast(net, user_skb, upcall_info->portid);
398 399
399out: 400out:
@@ -1690,6 +1691,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1690 if (IS_ERR(vport)) 1691 if (IS_ERR(vport))
1691 goto exit_unlock; 1692 goto exit_unlock;
1692 1693
1694 err = 0;
1693 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, 1695 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
1694 OVS_VPORT_CMD_NEW); 1696 OVS_VPORT_CMD_NEW);
1695 if (IS_ERR(reply)) { 1697 if (IS_ERR(reply)) {
@@ -1771,6 +1773,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1771 if (IS_ERR(reply)) 1773 if (IS_ERR(reply))
1772 goto exit_unlock; 1774 goto exit_unlock;
1773 1775
1776 err = 0;
1774 ovs_dp_detach_port(vport); 1777 ovs_dp_detach_port(vport);
1775 1778
1776 genl_notify(reply, genl_info_net(info), info->snd_portid, 1779 genl_notify(reply, genl_info_net(info), info->snd_portid,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 20605ecf100b..fe0e4215c73d 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -482,7 +482,11 @@ static __be16 parse_ethertype(struct sk_buff *skb)
482 return htons(ETH_P_802_2); 482 return htons(ETH_P_802_2);
483 483
484 __skb_pull(skb, sizeof(struct llc_snap_hdr)); 484 __skb_pull(skb, sizeof(struct llc_snap_hdr));
485 return llc->ethertype; 485
486 if (ntohs(llc->ethertype) >= 1536)
487 return llc->ethertype;
488
489 return htons(ETH_P_802_2);
486} 490}
487 491
488static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, 492static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 670cbc3518de..2130d61c384a 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -43,8 +43,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
43 43
44 /* Make our own copy of the packet. Otherwise we will mangle the 44 /* Make our own copy of the packet. Otherwise we will mangle the
45 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). 45 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
46 * (No one comes after us, since we tell handle_bridge() that we took 46 */
47 * the packet.) */
48 skb = skb_share_check(skb, GFP_ATOMIC); 47 skb = skb_share_check(skb, GFP_ATOMIC);
49 if (unlikely(!skb)) 48 if (unlikely(!skb))
50 return; 49 return;
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index ba717cc038b3..f6b8132ce4cb 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -325,8 +325,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
325 * @skb: skb that was received 325 * @skb: skb that was received
326 * 326 *
327 * Must be called with rcu_read_lock. The packet cannot be shared and 327 * Must be called with rcu_read_lock. The packet cannot be shared and
328 * skb->data should point to the Ethernet header. The caller must have already 328 * skb->data should point to the Ethernet header.
329 * called compute_ip_summed() to initialize the checksumming fields.
330 */ 329 */
331void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) 330void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
332{ 331{
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 43cd0dd9149d..d2709e2b7be6 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1079,7 +1079,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
1079 transports) { 1079 transports) {
1080 1080
1081 if (transport == active) 1081 if (transport == active)
1082 break; 1082 continue;
1083 list_for_each_entry(chunk, &transport->transmitted, 1083 list_for_each_entry(chunk, &transport->transmitted,
1084 transmitted_list) { 1084 transmitted_list) {
1085 if (key == chunk->subh.data_hdr->tsn) { 1085 if (key == chunk->subh.data_hdr->tsn) {
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 5131fcfedb03..de1a0138317f 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2082,7 +2082,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
2082 } 2082 }
2083 2083
2084 /* Delete the tempory new association. */ 2084 /* Delete the tempory new association. */
2085 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 2085 sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
2086 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 2086 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
2087 2087
2088 /* Restore association pointer to provide SCTP command interpeter 2088 /* Restore association pointer to provide SCTP command interpeter
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index fb20f25ddec9..f8529fc8e542 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -180,6 +180,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
180 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 180 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
181 task->tk_waitqueue = queue; 181 task->tk_waitqueue = queue;
182 queue->qlen++; 182 queue->qlen++;
183 /* barrier matches the read in rpc_wake_up_task_queue_locked() */
184 smp_wmb();
183 rpc_set_queued(task); 185 rpc_set_queued(task);
184 186
185 dprintk("RPC: %5u added to queue %p \"%s\"\n", 187 dprintk("RPC: %5u added to queue %p \"%s\"\n",
@@ -430,8 +432,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
430 */ 432 */
431static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) 433static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
432{ 434{
433 if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) 435 if (RPC_IS_QUEUED(task)) {
434 __rpc_do_wake_up_task(queue, task); 436 smp_rmb();
437 if (task->tk_waitqueue == queue)
438 __rpc_do_wake_up_task(queue, task);
439 }
435} 440}
436 441
437/* 442/*
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 51be64f163ec..971282b6f6a3 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -382,7 +382,7 @@ static void unix_sock_destructor(struct sock *sk)
382#endif 382#endif
383} 383}
384 384
385static int unix_release_sock(struct sock *sk, int embrion) 385static void unix_release_sock(struct sock *sk, int embrion)
386{ 386{
387 struct unix_sock *u = unix_sk(sk); 387 struct unix_sock *u = unix_sk(sk);
388 struct path path; 388 struct path path;
@@ -451,8 +451,6 @@ static int unix_release_sock(struct sock *sk, int embrion)
451 451
452 if (unix_tot_inflight) 452 if (unix_tot_inflight)
453 unix_gc(); /* Garbage collect fds */ 453 unix_gc(); /* Garbage collect fds */
454
455 return 0;
456} 454}
457 455
458static void init_peercred(struct sock *sk) 456static void init_peercred(struct sock *sk)
@@ -699,9 +697,10 @@ static int unix_release(struct socket *sock)
699 if (!sk) 697 if (!sk)
700 return 0; 698 return 0;
701 699
700 unix_release_sock(sk, 0);
702 sock->sk = NULL; 701 sock->sk = NULL;
703 702
704 return unix_release_sock(sk, 0); 703 return 0;
705} 704}
706 705
707static int unix_autobind(struct socket *sock) 706static int unix_autobind(struct socket *sock)
@@ -1413,8 +1412,8 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1413 if (UNIXCB(skb).cred) 1412 if (UNIXCB(skb).cred)
1414 return; 1413 return;
1415 if (test_bit(SOCK_PASSCRED, &sock->flags) || 1414 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1416 !other->sk_socket || 1415 (other->sk_socket &&
1417 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) { 1416 test_bit(SOCK_PASSCRED, &other->sk_socket->flags))) {
1418 UNIXCB(skb).pid = get_pid(task_tgid(current)); 1417 UNIXCB(skb).pid = get_pid(task_tgid(current));
1419 UNIXCB(skb).cred = get_current_cred(); 1418 UNIXCB(skb).cred = get_current_cred();
1420 } 1419 }
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 48665ecd1197..8ab295154517 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -310,7 +310,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
310 310
311 if (old_ctx) { 311 if (old_ctx) {
312 new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len, 312 new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len,
313 GFP_KERNEL); 313 GFP_ATOMIC);
314 if (!new_ctx) 314 if (!new_ctx)
315 return -ENOMEM; 315 return -ENOMEM;
316 316
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 23414b93771f..13c88fbcf037 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -347,10 +347,8 @@ int yama_ptrace_traceme(struct task_struct *parent)
347 /* Only disallow PTRACE_TRACEME on more aggressive settings. */ 347 /* Only disallow PTRACE_TRACEME on more aggressive settings. */
348 switch (ptrace_scope) { 348 switch (ptrace_scope) {
349 case YAMA_SCOPE_CAPABILITY: 349 case YAMA_SCOPE_CAPABILITY:
350 rcu_read_lock(); 350 if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE))
351 if (!ns_capable(__task_cred(parent)->user_ns, CAP_SYS_PTRACE))
352 rc = -EPERM; 351 rc = -EPERM;
353 rcu_read_unlock();
354 break; 352 break;
355 case YAMA_SCOPE_NO_ATTACH: 353 case YAMA_SCOPE_NO_ATTACH:
356 rc = -EPERM; 354 rc = -EPERM;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index a9ebcf9e3710..ecdf30eb5879 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -3144,7 +3144,7 @@ static unsigned int convert_to_spdif_status(unsigned short val)
3144 if (val & AC_DIG1_PROFESSIONAL) 3144 if (val & AC_DIG1_PROFESSIONAL)
3145 sbits |= IEC958_AES0_PROFESSIONAL; 3145 sbits |= IEC958_AES0_PROFESSIONAL;
3146 if (sbits & IEC958_AES0_PROFESSIONAL) { 3146 if (sbits & IEC958_AES0_PROFESSIONAL) {
3147 if (sbits & AC_DIG1_EMPHASIS) 3147 if (val & AC_DIG1_EMPHASIS)
3148 sbits |= IEC958_AES0_PRO_EMPHASIS_5015; 3148 sbits |= IEC958_AES0_PRO_EMPHASIS_5015;
3149 } else { 3149 } else {
3150 if (val & AC_DIG1_EMPHASIS) 3150 if (val & AC_DIG1_EMPHASIS)
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 78897d05d80f..43c2ea539561 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -995,6 +995,8 @@ enum {
995 BAD_NO_EXTRA_SURR_DAC = 0x101, 995 BAD_NO_EXTRA_SURR_DAC = 0x101,
996 /* Primary DAC shared with main surrounds */ 996 /* Primary DAC shared with main surrounds */
997 BAD_SHARED_SURROUND = 0x100, 997 BAD_SHARED_SURROUND = 0x100,
998 /* No independent HP possible */
999 BAD_NO_INDEP_HP = 0x40,
998 /* Primary DAC shared with main CLFE */ 1000 /* Primary DAC shared with main CLFE */
999 BAD_SHARED_CLFE = 0x10, 1001 BAD_SHARED_CLFE = 0x10,
1000 /* Primary DAC shared with extra surrounds */ 1002 /* Primary DAC shared with extra surrounds */
@@ -1392,6 +1394,43 @@ static int check_aamix_out_path(struct hda_codec *codec, int path_idx)
1392 return snd_hda_get_path_idx(codec, path); 1394 return snd_hda_get_path_idx(codec, path);
1393} 1395}
1394 1396
1397/* check whether the independent HP is available with the current config */
1398static bool indep_hp_possible(struct hda_codec *codec)
1399{
1400 struct hda_gen_spec *spec = codec->spec;
1401 struct auto_pin_cfg *cfg = &spec->autocfg;
1402 struct nid_path *path;
1403 int i, idx;
1404
1405 if (cfg->line_out_type == AUTO_PIN_HP_OUT)
1406 idx = spec->out_paths[0];
1407 else
1408 idx = spec->hp_paths[0];
1409 path = snd_hda_get_path_from_idx(codec, idx);
1410 if (!path)
1411 return false;
1412
1413 /* assume no path conflicts unless aamix is involved */
1414 if (!spec->mixer_nid || !is_nid_contained(path, spec->mixer_nid))
1415 return true;
1416
1417 /* check whether output paths contain aamix */
1418 for (i = 0; i < cfg->line_outs; i++) {
1419 if (spec->out_paths[i] == idx)
1420 break;
1421 path = snd_hda_get_path_from_idx(codec, spec->out_paths[i]);
1422 if (path && is_nid_contained(path, spec->mixer_nid))
1423 return false;
1424 }
1425 for (i = 0; i < cfg->speaker_outs; i++) {
1426 path = snd_hda_get_path_from_idx(codec, spec->speaker_paths[i]);
1427 if (path && is_nid_contained(path, spec->mixer_nid))
1428 return false;
1429 }
1430
1431 return true;
1432}
1433
1395/* fill the empty entries in the dac array for speaker/hp with the 1434/* fill the empty entries in the dac array for speaker/hp with the
1396 * shared dac pointed by the paths 1435 * shared dac pointed by the paths
1397 */ 1436 */
@@ -1545,6 +1584,9 @@ static int fill_and_eval_dacs(struct hda_codec *codec,
1545 badness += BAD_MULTI_IO; 1584 badness += BAD_MULTI_IO;
1546 } 1585 }
1547 1586
1587 if (spec->indep_hp && !indep_hp_possible(codec))
1588 badness += BAD_NO_INDEP_HP;
1589
1548 /* re-fill the shared DAC for speaker / headphone */ 1590 /* re-fill the shared DAC for speaker / headphone */
1549 if (cfg->line_out_type != AUTO_PIN_HP_OUT) 1591 if (cfg->line_out_type != AUTO_PIN_HP_OUT)
1550 refill_shared_dacs(codec, cfg->hp_outs, 1592 refill_shared_dacs(codec, cfg->hp_outs,
@@ -1758,6 +1800,10 @@ static int parse_output_paths(struct hda_codec *codec)
1758 cfg->speaker_pins, val); 1800 cfg->speaker_pins, val);
1759 } 1801 }
1760 1802
1803 /* clear indep_hp flag if not available */
1804 if (spec->indep_hp && !indep_hp_possible(codec))
1805 spec->indep_hp = 0;
1806
1761 kfree(best_cfg); 1807 kfree(best_cfg);
1762 return 0; 1808 return 0;
1763} 1809}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4cea6bb6fade..418bfc0eb0a3 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -415,6 +415,8 @@ struct azx_dev {
415 unsigned int opened :1; 415 unsigned int opened :1;
416 unsigned int running :1; 416 unsigned int running :1;
417 unsigned int irq_pending :1; 417 unsigned int irq_pending :1;
418 unsigned int prepared:1;
419 unsigned int locked:1;
418 /* 420 /*
419 * For VIA: 421 * For VIA:
420 * A flag to ensure DMA position is 0 422 * A flag to ensure DMA position is 0
@@ -426,8 +428,25 @@ struct azx_dev {
426 428
427 struct timecounter azx_tc; 429 struct timecounter azx_tc;
428 struct cyclecounter azx_cc; 430 struct cyclecounter azx_cc;
431
432#ifdef CONFIG_SND_HDA_DSP_LOADER
433 struct mutex dsp_mutex;
434#endif
429}; 435};
430 436
437/* DSP lock helpers */
438#ifdef CONFIG_SND_HDA_DSP_LOADER
439#define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
440#define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
441#define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
442#define dsp_is_locked(dev) ((dev)->locked)
443#else
444#define dsp_lock_init(dev) do {} while (0)
445#define dsp_lock(dev) do {} while (0)
446#define dsp_unlock(dev) do {} while (0)
447#define dsp_is_locked(dev) 0
448#endif
449
431/* CORB/RIRB */ 450/* CORB/RIRB */
432struct azx_rb { 451struct azx_rb {
433 u32 *buf; /* CORB/RIRB buffer 452 u32 *buf; /* CORB/RIRB buffer
@@ -527,6 +546,10 @@ struct azx {
527 546
528 /* card list (for power_save trigger) */ 547 /* card list (for power_save trigger) */
529 struct list_head list; 548 struct list_head list;
549
550#ifdef CONFIG_SND_HDA_DSP_LOADER
551 struct azx_dev saved_azx_dev;
552#endif
530}; 553};
531 554
532#define CREATE_TRACE_POINTS 555#define CREATE_TRACE_POINTS
@@ -1793,15 +1816,25 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
1793 dev = chip->capture_index_offset; 1816 dev = chip->capture_index_offset;
1794 nums = chip->capture_streams; 1817 nums = chip->capture_streams;
1795 } 1818 }
1796 for (i = 0; i < nums; i++, dev++) 1819 for (i = 0; i < nums; i++, dev++) {
1797 if (!chip->azx_dev[dev].opened) { 1820 struct azx_dev *azx_dev = &chip->azx_dev[dev];
1798 res = &chip->azx_dev[dev]; 1821 dsp_lock(azx_dev);
1799 if (res->assigned_key == key) 1822 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
1800 break; 1823 res = azx_dev;
1824 if (res->assigned_key == key) {
1825 res->opened = 1;
1826 res->assigned_key = key;
1827 dsp_unlock(azx_dev);
1828 return azx_dev;
1829 }
1801 } 1830 }
1831 dsp_unlock(azx_dev);
1832 }
1802 if (res) { 1833 if (res) {
1834 dsp_lock(res);
1803 res->opened = 1; 1835 res->opened = 1;
1804 res->assigned_key = key; 1836 res->assigned_key = key;
1837 dsp_unlock(res);
1805 } 1838 }
1806 return res; 1839 return res;
1807} 1840}
@@ -2009,6 +2042,12 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
2009 struct azx_dev *azx_dev = get_azx_dev(substream); 2042 struct azx_dev *azx_dev = get_azx_dev(substream);
2010 int ret; 2043 int ret;
2011 2044
2045 dsp_lock(azx_dev);
2046 if (dsp_is_locked(azx_dev)) {
2047 ret = -EBUSY;
2048 goto unlock;
2049 }
2050
2012 mark_runtime_wc(chip, azx_dev, substream, false); 2051 mark_runtime_wc(chip, azx_dev, substream, false);
2013 azx_dev->bufsize = 0; 2052 azx_dev->bufsize = 0;
2014 azx_dev->period_bytes = 0; 2053 azx_dev->period_bytes = 0;
@@ -2016,8 +2055,10 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
2016 ret = snd_pcm_lib_malloc_pages(substream, 2055 ret = snd_pcm_lib_malloc_pages(substream,
2017 params_buffer_bytes(hw_params)); 2056 params_buffer_bytes(hw_params));
2018 if (ret < 0) 2057 if (ret < 0)
2019 return ret; 2058 goto unlock;
2020 mark_runtime_wc(chip, azx_dev, substream, true); 2059 mark_runtime_wc(chip, azx_dev, substream, true);
2060 unlock:
2061 dsp_unlock(azx_dev);
2021 return ret; 2062 return ret;
2022} 2063}
2023 2064
@@ -2029,16 +2070,21 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
2029 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream]; 2070 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
2030 2071
2031 /* reset BDL address */ 2072 /* reset BDL address */
2032 azx_sd_writel(azx_dev, SD_BDLPL, 0); 2073 dsp_lock(azx_dev);
2033 azx_sd_writel(azx_dev, SD_BDLPU, 0); 2074 if (!dsp_is_locked(azx_dev)) {
2034 azx_sd_writel(azx_dev, SD_CTL, 0); 2075 azx_sd_writel(azx_dev, SD_BDLPL, 0);
2035 azx_dev->bufsize = 0; 2076 azx_sd_writel(azx_dev, SD_BDLPU, 0);
2036 azx_dev->period_bytes = 0; 2077 azx_sd_writel(azx_dev, SD_CTL, 0);
2037 azx_dev->format_val = 0; 2078 azx_dev->bufsize = 0;
2079 azx_dev->period_bytes = 0;
2080 azx_dev->format_val = 0;
2081 }
2038 2082
2039 snd_hda_codec_cleanup(apcm->codec, hinfo, substream); 2083 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
2040 2084
2041 mark_runtime_wc(chip, azx_dev, substream, false); 2085 mark_runtime_wc(chip, azx_dev, substream, false);
2086 azx_dev->prepared = 0;
2087 dsp_unlock(azx_dev);
2042 return snd_pcm_lib_free_pages(substream); 2088 return snd_pcm_lib_free_pages(substream);
2043} 2089}
2044 2090
@@ -2055,6 +2101,12 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
2055 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); 2101 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
2056 unsigned short ctls = spdif ? spdif->ctls : 0; 2102 unsigned short ctls = spdif ? spdif->ctls : 0;
2057 2103
2104 dsp_lock(azx_dev);
2105 if (dsp_is_locked(azx_dev)) {
2106 err = -EBUSY;
2107 goto unlock;
2108 }
2109
2058 azx_stream_reset(chip, azx_dev); 2110 azx_stream_reset(chip, azx_dev);
2059 format_val = snd_hda_calc_stream_format(runtime->rate, 2111 format_val = snd_hda_calc_stream_format(runtime->rate,
2060 runtime->channels, 2112 runtime->channels,
@@ -2065,7 +2117,8 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
2065 snd_printk(KERN_ERR SFX 2117 snd_printk(KERN_ERR SFX
2066 "%s: invalid format_val, rate=%d, ch=%d, format=%d\n", 2118 "%s: invalid format_val, rate=%d, ch=%d, format=%d\n",
2067 pci_name(chip->pci), runtime->rate, runtime->channels, runtime->format); 2119 pci_name(chip->pci), runtime->rate, runtime->channels, runtime->format);
2068 return -EINVAL; 2120 err = -EINVAL;
2121 goto unlock;
2069 } 2122 }
2070 2123
2071 bufsize = snd_pcm_lib_buffer_bytes(substream); 2124 bufsize = snd_pcm_lib_buffer_bytes(substream);
@@ -2084,7 +2137,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
2084 azx_dev->no_period_wakeup = runtime->no_period_wakeup; 2137 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
2085 err = azx_setup_periods(chip, substream, azx_dev); 2138 err = azx_setup_periods(chip, substream, azx_dev);
2086 if (err < 0) 2139 if (err < 0)
2087 return err; 2140 goto unlock;
2088 } 2141 }
2089 2142
2090 /* wallclk has 24Mhz clock source */ 2143 /* wallclk has 24Mhz clock source */
@@ -2101,8 +2154,14 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
2101 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) && 2154 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
2102 stream_tag > chip->capture_streams) 2155 stream_tag > chip->capture_streams)
2103 stream_tag -= chip->capture_streams; 2156 stream_tag -= chip->capture_streams;
2104 return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, 2157 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
2105 azx_dev->format_val, substream); 2158 azx_dev->format_val, substream);
2159
2160 unlock:
2161 if (!err)
2162 azx_dev->prepared = 1;
2163 dsp_unlock(azx_dev);
2164 return err;
2106} 2165}
2107 2166
2108static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 2167static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
@@ -2117,6 +2176,9 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
2117 azx_dev = get_azx_dev(substream); 2176 azx_dev = get_azx_dev(substream);
2118 trace_azx_pcm_trigger(chip, azx_dev, cmd); 2177 trace_azx_pcm_trigger(chip, azx_dev, cmd);
2119 2178
2179 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
2180 return -EPIPE;
2181
2120 switch (cmd) { 2182 switch (cmd) {
2121 case SNDRV_PCM_TRIGGER_START: 2183 case SNDRV_PCM_TRIGGER_START:
2122 rstart = 1; 2184 rstart = 1;
@@ -2621,17 +2683,27 @@ static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
2621 struct azx_dev *azx_dev; 2683 struct azx_dev *azx_dev;
2622 int err; 2684 int err;
2623 2685
2624 if (snd_hda_lock_devices(bus)) 2686 azx_dev = azx_get_dsp_loader_dev(chip);
2625 return -EBUSY; 2687
2688 dsp_lock(azx_dev);
2689 spin_lock_irq(&chip->reg_lock);
2690 if (azx_dev->running || azx_dev->locked) {
2691 spin_unlock_irq(&chip->reg_lock);
2692 err = -EBUSY;
2693 goto unlock;
2694 }
2695 azx_dev->prepared = 0;
2696 chip->saved_azx_dev = *azx_dev;
2697 azx_dev->locked = 1;
2698 spin_unlock_irq(&chip->reg_lock);
2626 2699
2627 err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, 2700 err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG,
2628 snd_dma_pci_data(chip->pci), 2701 snd_dma_pci_data(chip->pci),
2629 byte_size, bufp); 2702 byte_size, bufp);
2630 if (err < 0) 2703 if (err < 0)
2631 goto unlock; 2704 goto err_alloc;
2632 2705
2633 mark_pages_wc(chip, bufp, true); 2706 mark_pages_wc(chip, bufp, true);
2634 azx_dev = azx_get_dsp_loader_dev(chip);
2635 azx_dev->bufsize = byte_size; 2707 azx_dev->bufsize = byte_size;
2636 azx_dev->period_bytes = byte_size; 2708 azx_dev->period_bytes = byte_size;
2637 azx_dev->format_val = format; 2709 azx_dev->format_val = format;
@@ -2649,13 +2721,20 @@ static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
2649 goto error; 2721 goto error;
2650 2722
2651 azx_setup_controller(chip, azx_dev); 2723 azx_setup_controller(chip, azx_dev);
2724 dsp_unlock(azx_dev);
2652 return azx_dev->stream_tag; 2725 return azx_dev->stream_tag;
2653 2726
2654 error: 2727 error:
2655 mark_pages_wc(chip, bufp, false); 2728 mark_pages_wc(chip, bufp, false);
2656 snd_dma_free_pages(bufp); 2729 snd_dma_free_pages(bufp);
2657unlock: 2730 err_alloc:
2658 snd_hda_unlock_devices(bus); 2731 spin_lock_irq(&chip->reg_lock);
2732 if (azx_dev->opened)
2733 *azx_dev = chip->saved_azx_dev;
2734 azx_dev->locked = 0;
2735 spin_unlock_irq(&chip->reg_lock);
2736 unlock:
2737 dsp_unlock(azx_dev);
2659 return err; 2738 return err;
2660} 2739}
2661 2740
@@ -2677,9 +2756,10 @@ static void azx_load_dsp_cleanup(struct hda_bus *bus,
2677 struct azx *chip = bus->private_data; 2756 struct azx *chip = bus->private_data;
2678 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip); 2757 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
2679 2758
2680 if (!dmab->area) 2759 if (!dmab->area || !azx_dev->locked)
2681 return; 2760 return;
2682 2761
2762 dsp_lock(azx_dev);
2683 /* reset BDL address */ 2763 /* reset BDL address */
2684 azx_sd_writel(azx_dev, SD_BDLPL, 0); 2764 azx_sd_writel(azx_dev, SD_BDLPL, 0);
2685 azx_sd_writel(azx_dev, SD_BDLPU, 0); 2765 azx_sd_writel(azx_dev, SD_BDLPU, 0);
@@ -2692,7 +2772,12 @@ static void azx_load_dsp_cleanup(struct hda_bus *bus,
2692 snd_dma_free_pages(dmab); 2772 snd_dma_free_pages(dmab);
2693 dmab->area = NULL; 2773 dmab->area = NULL;
2694 2774
2695 snd_hda_unlock_devices(bus); 2775 spin_lock_irq(&chip->reg_lock);
2776 if (azx_dev->opened)
2777 *azx_dev = chip->saved_azx_dev;
2778 azx_dev->locked = 0;
2779 spin_unlock_irq(&chip->reg_lock);
2780 dsp_unlock(azx_dev);
2696} 2781}
2697#endif /* CONFIG_SND_HDA_DSP_LOADER */ 2782#endif /* CONFIG_SND_HDA_DSP_LOADER */
2698 2783
@@ -3481,6 +3566,7 @@ static int azx_first_init(struct azx *chip)
3481 } 3566 }
3482 3567
3483 for (i = 0; i < chip->num_streams; i++) { 3568 for (i = 0; i < chip->num_streams; i++) {
3569 dsp_lock_init(&chip->azx_dev[i]);
3484 /* allocate memory for the BDL for each stream */ 3570 /* allocate memory for the BDL for each stream */
3485 err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, 3571 err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
3486 snd_dma_pci_data(chip->pci), 3572 snd_dma_pci_data(chip->pci),
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 60d08f669f0c..0d9c58f13560 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -168,10 +168,10 @@ static void cs_automute(struct hda_codec *codec)
168 snd_hda_gen_update_outputs(codec); 168 snd_hda_gen_update_outputs(codec);
169 169
170 if (spec->gpio_eapd_hp) { 170 if (spec->gpio_eapd_hp) {
171 unsigned int gpio = spec->gen.hp_jack_present ? 171 spec->gpio_data = spec->gen.hp_jack_present ?
172 spec->gpio_eapd_hp : spec->gpio_eapd_speaker; 172 spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
173 snd_hda_codec_write(codec, 0x01, 0, 173 snd_hda_codec_write(codec, 0x01, 0,
174 AC_VERB_SET_GPIO_DATA, gpio); 174 AC_VERB_SET_GPIO_DATA, spec->gpio_data);
175 } 175 }
176} 176}
177 177
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 941bf6c766ec..2a89d1eefeb6 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -1142,7 +1142,7 @@ static int patch_cxt5045(struct hda_codec *codec)
1142 } 1142 }
1143 1143
1144 if (spec->beep_amp) 1144 if (spec->beep_amp)
1145 snd_hda_attach_beep_device(codec, spec->beep_amp); 1145 snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
1146 1146
1147 return 0; 1147 return 0;
1148} 1148}
@@ -1921,7 +1921,7 @@ static int patch_cxt5051(struct hda_codec *codec)
1921 } 1921 }
1922 1922
1923 if (spec->beep_amp) 1923 if (spec->beep_amp)
1924 snd_hda_attach_beep_device(codec, spec->beep_amp); 1924 snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
1925 1925
1926 return 0; 1926 return 0;
1927} 1927}
@@ -3099,7 +3099,7 @@ static int patch_cxt5066(struct hda_codec *codec)
3099 } 3099 }
3100 3100
3101 if (spec->beep_amp) 3101 if (spec->beep_amp)
3102 snd_hda_attach_beep_device(codec, spec->beep_amp); 3102 snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
3103 3103
3104 return 0; 3104 return 0;
3105} 3105}
@@ -3191,11 +3191,17 @@ static int cx_auto_build_controls(struct hda_codec *codec)
3191 return 0; 3191 return 0;
3192} 3192}
3193 3193
3194static void cx_auto_free(struct hda_codec *codec)
3195{
3196 snd_hda_detach_beep_device(codec);
3197 snd_hda_gen_free(codec);
3198}
3199
3194static const struct hda_codec_ops cx_auto_patch_ops = { 3200static const struct hda_codec_ops cx_auto_patch_ops = {
3195 .build_controls = cx_auto_build_controls, 3201 .build_controls = cx_auto_build_controls,
3196 .build_pcms = snd_hda_gen_build_pcms, 3202 .build_pcms = snd_hda_gen_build_pcms,
3197 .init = snd_hda_gen_init, 3203 .init = snd_hda_gen_init,
3198 .free = snd_hda_gen_free, 3204 .free = cx_auto_free,
3199 .unsol_event = snd_hda_jack_unsol_event, 3205 .unsol_event = snd_hda_jack_unsol_event,
3200#ifdef CONFIG_PM 3206#ifdef CONFIG_PM
3201 .check_power_status = snd_hda_gen_check_power_status, 3207 .check_power_status = snd_hda_gen_check_power_status,
@@ -3391,7 +3397,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
3391 3397
3392 codec->patch_ops = cx_auto_patch_ops; 3398 codec->patch_ops = cx_auto_patch_ops;
3393 if (spec->beep_amp) 3399 if (spec->beep_amp)
3394 snd_hda_attach_beep_device(codec, spec->beep_amp); 3400 snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
3395 3401
3396 /* Some laptops with Conexant chips show stalls in S3 resume, 3402 /* Some laptops with Conexant chips show stalls in S3 resume,
3397 * which falls into the single-cmd mode. 3403 * which falls into the single-cmd mode.
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 638e7f738018..ca4739c3f650 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -715,8 +715,9 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
715 case UAC2_CLOCK_SELECTOR: { 715 case UAC2_CLOCK_SELECTOR: {
716 struct uac_selector_unit_descriptor *d = p1; 716 struct uac_selector_unit_descriptor *d = p1;
717 /* call recursively to retrieve the channel info */ 717 /* call recursively to retrieve the channel info */
718 if (check_input_term(state, d->baSourceID[0], term) < 0) 718 err = check_input_term(state, d->baSourceID[0], term);
719 return -ENODEV; 719 if (err < 0)
720 return err;
720 term->type = d->bDescriptorSubtype << 16; /* virtual type */ 721 term->type = d->bDescriptorSubtype << 16; /* virtual type */
721 term->id = id; 722 term->id = id;
722 term->name = uac_selector_unit_iSelector(d); 723 term->name = uac_selector_unit_iSelector(d);
@@ -725,7 +726,8 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
725 case UAC1_PROCESSING_UNIT: 726 case UAC1_PROCESSING_UNIT:
726 case UAC1_EXTENSION_UNIT: 727 case UAC1_EXTENSION_UNIT:
727 /* UAC2_PROCESSING_UNIT_V2 */ 728 /* UAC2_PROCESSING_UNIT_V2 */
728 /* UAC2_EFFECT_UNIT */ { 729 /* UAC2_EFFECT_UNIT */
730 case UAC2_EXTENSION_UNIT_V2: {
729 struct uac_processing_unit_descriptor *d = p1; 731 struct uac_processing_unit_descriptor *d = p1;
730 732
731 if (state->mixer->protocol == UAC_VERSION_2 && 733 if (state->mixer->protocol == UAC_VERSION_2 &&
@@ -1356,8 +1358,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
1356 return err; 1358 return err;
1357 1359
1358 /* determine the input source type and name */ 1360 /* determine the input source type and name */
1359 if (check_input_term(state, hdr->bSourceID, &iterm) < 0) 1361 err = check_input_term(state, hdr->bSourceID, &iterm);
1360 return -EINVAL; 1362 if (err < 0)
1363 return err;
1361 1364
1362 master_bits = snd_usb_combine_bytes(bmaControls, csize); 1365 master_bits = snd_usb_combine_bytes(bmaControls, csize);
1363 /* master configuration quirks */ 1366 /* master configuration quirks */
@@ -2052,6 +2055,8 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
2052 return parse_audio_extension_unit(state, unitid, p1); 2055 return parse_audio_extension_unit(state, unitid, p1);
2053 else /* UAC_VERSION_2 */ 2056 else /* UAC_VERSION_2 */
2054 return parse_audio_processing_unit(state, unitid, p1); 2057 return parse_audio_processing_unit(state, unitid, p1);
2058 case UAC2_EXTENSION_UNIT_V2:
2059 return parse_audio_extension_unit(state, unitid, p1);
2055 default: 2060 default:
2056 snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]); 2061 snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]);
2057 return -EINVAL; 2062 return -EINVAL;
@@ -2118,7 +2123,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
2118 state.oterm.type = le16_to_cpu(desc->wTerminalType); 2123 state.oterm.type = le16_to_cpu(desc->wTerminalType);
2119 state.oterm.name = desc->iTerminal; 2124 state.oterm.name = desc->iTerminal;
2120 err = parse_audio_unit(&state, desc->bSourceID); 2125 err = parse_audio_unit(&state, desc->bSourceID);
2121 if (err < 0) 2126 if (err < 0 && err != -EINVAL)
2122 return err; 2127 return err;
2123 } else { /* UAC_VERSION_2 */ 2128 } else { /* UAC_VERSION_2 */
2124 struct uac2_output_terminal_descriptor *desc = p; 2129 struct uac2_output_terminal_descriptor *desc = p;
@@ -2130,12 +2135,12 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
2130 state.oterm.type = le16_to_cpu(desc->wTerminalType); 2135 state.oterm.type = le16_to_cpu(desc->wTerminalType);
2131 state.oterm.name = desc->iTerminal; 2136 state.oterm.name = desc->iTerminal;
2132 err = parse_audio_unit(&state, desc->bSourceID); 2137 err = parse_audio_unit(&state, desc->bSourceID);
2133 if (err < 0) 2138 if (err < 0 && err != -EINVAL)
2134 return err; 2139 return err;
2135 2140
2136 /* for UAC2, use the same approach to also add the clock selectors */ 2141 /* for UAC2, use the same approach to also add the clock selectors */
2137 err = parse_audio_unit(&state, desc->bCSourceID); 2142 err = parse_audio_unit(&state, desc->bCSourceID);
2138 if (err < 0) 2143 if (err < 0 && err != -EINVAL)
2139 return err; 2144 return err;
2140 } 2145 }
2141 } 2146 }
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index a20e32033431..0b0a90787db6 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -122,7 +122,7 @@ export Q VERBOSE
122 122
123EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION) 123EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
124 124
125INCLUDES = -I. -I/usr/local/include $(CONFIG_INCLUDES) 125INCLUDES = -I. $(CONFIG_INCLUDES)
126 126
127# Set compile option CFLAGS if not set elsewhere 127# Set compile option CFLAGS if not set elsewhere
128CFLAGS ?= -g -Wall 128CFLAGS ?= -g -Wall
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index a2108ca1cc17..bb74c79cd16e 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -95,7 +95,7 @@ ifeq ("$(origin DEBUG)", "command line")
95 PERF_DEBUG = $(DEBUG) 95 PERF_DEBUG = $(DEBUG)
96endif 96endif
97ifndef PERF_DEBUG 97ifndef PERF_DEBUG
98 CFLAGS_OPTIMIZE = -O6 -D_FORTIFY_SOURCE=2 98 CFLAGS_OPTIMIZE = -O6
99endif 99endif
100 100
101ifdef PARSER_DEBUG 101ifdef PARSER_DEBUG
@@ -180,6 +180,12 @@ ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -Wvolatile-register-var,-W
180 CFLAGS := $(CFLAGS) -Wvolatile-register-var 180 CFLAGS := $(CFLAGS) -Wvolatile-register-var
181endif 181endif
182 182
183ifndef PERF_DEBUG
184 ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -D_FORTIFY_SOURCE=2,-D_FORTIFY_SOURCE=2),y)
185 CFLAGS := $(CFLAGS) -D_FORTIFY_SOURCE=2
186 endif
187endif
188
183### --- END CONFIGURATION SECTION --- 189### --- END CONFIGURATION SECTION ---
184 190
185ifeq ($(srctree),) 191ifeq ($(srctree),)
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index a5223e6a7b43..0fdc85269c4d 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -1,6 +1,30 @@
1#ifndef BENCH_H 1#ifndef BENCH_H
2#define BENCH_H 2#define BENCH_H
3 3
4/*
5 * The madvise transparent hugepage constants were added in glibc
6 * 2.13. For compatibility with older versions of glibc, define these
7 * tokens if they are not already defined.
8 *
9 * PA-RISC uses different madvise values from other architectures and
10 * needs to be special-cased.
11 */
12#ifdef __hppa__
13# ifndef MADV_HUGEPAGE
14# define MADV_HUGEPAGE 67
15# endif
16# ifndef MADV_NOHUGEPAGE
17# define MADV_NOHUGEPAGE 68
18# endif
19#else
20# ifndef MADV_HUGEPAGE
21# define MADV_HUGEPAGE 14
22# endif
23# ifndef MADV_NOHUGEPAGE
24# define MADV_NOHUGEPAGE 15
25# endif
26#endif
27
4extern int bench_numa(int argc, const char **argv, const char *prefix); 28extern int bench_numa(int argc, const char **argv, const char *prefix);
5extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); 29extern int bench_sched_messaging(int argc, const char **argv, const char *prefix);
6extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); 30extern int bench_sched_pipe(int argc, const char **argv, const char *prefix);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 774c90713a53..f1a939ebc19c 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -573,13 +573,15 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
573 perf_event__synthesize_guest_os, tool); 573 perf_event__synthesize_guest_os, tool);
574 } 574 }
575 575
576 if (!opts->target.system_wide) 576 if (perf_target__has_task(&opts->target))
577 err = perf_event__synthesize_thread_map(tool, evsel_list->threads, 577 err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
578 process_synthesized_event, 578 process_synthesized_event,
579 machine); 579 machine);
580 else 580 else if (perf_target__has_cpu(&opts->target))
581 err = perf_event__synthesize_threads(tool, process_synthesized_event, 581 err = perf_event__synthesize_threads(tool, process_synthesized_event,
582 machine); 582 machine);
583 else /* command specified */
584 err = 0;
583 585
584 if (err != 0) 586 if (err != 0)
585 goto out_delete_session; 587 goto out_delete_session;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 38624686ee9a..226a4ae2f936 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -208,8 +208,9 @@ static inline int script_browse(const char *script_opt __maybe_unused)
208 return 0; 208 return 0;
209} 209}
210 210
211#define K_LEFT -1 211#define K_LEFT -1000
212#define K_RIGHT -2 212#define K_RIGHT -2000
213#define K_SWITCH_INPUT_DATA -3000
213#endif 214#endif
214 215
215#ifdef GTK2_SUPPORT 216#ifdef GTK2_SUPPORT
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 55433aa42c8f..eabdce0a2daa 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -143,7 +143,7 @@ struct strlist *strlist__new(bool dupstr, const char *list)
143 slist->rblist.node_delete = strlist__node_delete; 143 slist->rblist.node_delete = strlist__node_delete;
144 144
145 slist->dupstr = dupstr; 145 slist->dupstr = dupstr;
146 if (slist && strlist__parse_list(slist, list) != 0) 146 if (list && strlist__parse_list(slist, list) != 0)
147 goto out_error; 147 goto out_error;
148 } 148 }
149 149
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index ce82b9401958..5ba005c00e2f 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -74,9 +74,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
74 u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; 74 u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
75 u64 redir_content; 75 u64 redir_content;
76 76
77 ASSERT(redir_index < IOAPIC_NUM_PINS); 77 if (redir_index < IOAPIC_NUM_PINS)
78 redir_content =
79 ioapic->redirtbl[redir_index].bits;
80 else
81 redir_content = ~0ULL;
78 82
79 redir_content = ioapic->redirtbl[redir_index].bits;
80 result = (ioapic->ioregsel & 0x1) ? 83 result = (ioapic->ioregsel & 0x1) ?
81 (redir_content >> 32) & 0xffffffff : 84 (redir_content >> 32) & 0xffffffff :
82 redir_content & 0xffffffff; 85 redir_content & 0xffffffff;