aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap5
-rw-r--r--CREDITS7
-rw-r--r--Documentation/acpi/enumeration.txt6
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt6
-rw-r--r--Documentation/input/event-codes.txt13
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--MAINTAINERS24
-rw-r--r--Makefile4
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi4
-rw-r--r--arch/arm/boot/dts/hi3620.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi4
-rw-r--r--arch/arm/boot/dts/ste-nomadik-s8815.dts2
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi7
-rw-r--r--arch/arm/crypto/aesbs-glue.c10
-rw-r--r--arch/arm/include/asm/mach/arch.h1
-rw-r--r--arch/arm/kernel/devtree.c8
-rw-r--r--arch/arm/kernel/iwmmxt.S23
-rw-r--r--arch/arm/kernel/kgdb.c4
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/mach-exynos/exynos.c10
-rw-r--r--arch/arm/mach-exynos/hotplug.c10
-rw-r--r--arch/arm/mach-exynos/platsmp.c34
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c4
-rw-r--r--arch/arm/mach-mvebu/coherency.c6
-rw-r--r--arch/arm/mach-mvebu/headsmp-a9.S9
-rw-r--r--arch/arm/mach-mvebu/pmsu.c10
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c18
-rw-r--r--arch/arm/mach-omap2/omap4-common.c4
-rw-r--r--arch/arm/mm/dma-mapping.c11
-rw-r--r--arch/arm/mm/idmap.c12
-rw-r--r--arch/arm/mm/mmu.c6
-rw-r--r--arch/arm/xen/grant-table.c5
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/crypto/aes-glue.c12
-rw-r--r--arch/arm64/kernel/efi-stub.c2
-rw-r--r--arch/arm64/mm/init.c17
-rw-r--r--arch/blackfin/configs/BF609-EZKIT_defconfig2
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S2
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c1
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c6
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c1
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c1
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c1
-rw-r--r--arch/blackfin/mach-bf609/boards/ezkit.c20
-rw-r--r--arch/blackfin/mach-bf609/include/mach/pm.h5
-rw-r--r--arch/blackfin/mach-bf609/pm.c4
-rw-r--r--arch/blackfin/mach-common/ints-priority.c2
-rw-r--r--arch/parisc/include/uapi/asm/signal.h2
-rw-r--r--arch/parisc/mm/init.c1
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/cputable.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h19
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h3
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/rtas_flash.c6
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S4
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c65
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c3
-rw-r--r--arch/powerpc/lib/mem_64.S2
-rw-r--r--arch/powerpc/lib/sstep.c10
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c10
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c4
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c1
-rw-r--r--arch/s390/include/asm/switch_to.h4
-rw-r--r--arch/s390/kernel/head.S6
-rw-r--r--arch/s390/kernel/ptrace.c12
-rw-r--r--arch/s390/pci/pci.c49
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/sys32.S1
-rw-r--r--arch/sparc/kernel/systbls_32.S1
-rw-r--r--arch/sparc/kernel/systbls_64.S2
-rw-r--r--arch/um/kernel/tlb.c9
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/um/os-Linux/skas/process.c9
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/header.S26
-rw-r--r--arch/x86/boot/tools/build.c38
-rw-r--r--arch/x86/include/asm/irqflags.h2
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c22
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c12
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event.h12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c78
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c11
-rw-r--r--arch/x86/kernel/entry_32.S9
-rw-r--r--arch/x86/kernel/entry_64.S28
-rw-r--r--arch/x86/kernel/espfix_64.c5
-rw-r--r--arch/x86/kernel/kprobes/core.c3
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c2
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--arch/x86/xen/grant-table.c148
-rw-r--r--arch/xtensa/kernel/vectors.S158
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S4
-rw-r--r--arch/xtensa/mm/init.c2
-rw-r--r--block/blk-cgroup.c7
-rw-r--r--block/blk-tag.c33
-rw-r--r--block/compat_ioctl.c1
-rw-r--r--crypto/af_alg.c2
-rw-r--r--drivers/acpi/video.c10
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/libata-core.c12
-rw-r--r--drivers/ata/libata-eh.c9
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/base/platform.c18
-rw-r--r--drivers/block/drbd/drbd_nl.c6
-rw-r--r--drivers/block/zram/zram_drv.c22
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/bluetooth/hci_h5.c1
-rw-r--r--drivers/char/hw_random/core.c47
-rw-r--r--drivers/char/hw_random/virtio-rng.c10
-rw-r--r--drivers/char/random.c17
-rw-r--r--drivers/clk/ti/clk-7xx.c7
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c7
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c2
-rw-r--r--drivers/firewire/Kconfig1
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/firmware/efi/efi.c22
-rw-r--r--drivers/firmware/efi/fdt.c10
-rw-r--r--drivers/gpio/gpio-mcp23s08.c6
-rw-r--r--drivers/gpio/gpio-rcar.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c11
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c10
-rw-r--r--drivers/gpu/drm/radeon/cik.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r600.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c198
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c87
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c15
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hwmon/adt7470.c6
-rw-r--r--drivers/hwmon/da9052-hwmon.c2
-rw-r--r--drivers/hwmon/da9055-hwmon.c2
-rw-r--r--drivers/hwmon/smsc47m192.c4
-rw-r--r--drivers/ide/Kconfig5
-rw-r--r--drivers/ide/ide-probe.c8
-rw-r--r--drivers/iio/accel/bma180.c8
-rw-r--r--drivers/iio/accel/mma8452.c8
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/industrialio-event.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c18
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/keyboard/st-keyscan.c2
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c2
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/tablet/wacom_wac.c28
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c5
-rw-r--r--drivers/iommu/fsl_pamu.c8
-rw-r--r--drivers/iommu/fsl_pamu_domain.c18
-rw-r--r--drivers/irqchip/irq-gic.c7
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c1
-rw-r--r--drivers/isdn/hisax/l3ni1.c14
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c28
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-cache-target.c13
-rw-r--r--drivers/md/dm-thin-metadata.c9
-rw-r--r--drivers/media/dvb-frontends/si2168.c16
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c12
-rw-r--r--drivers/media/dvb-frontends/tda10071_priv.h1
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c1
-rw-r--r--drivers/media/platform/davinci/vpif_display.c1
-rw-r--r--drivers/media/tuners/si2157.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c40
-rw-r--r--drivers/media/usb/gspca/pac7302.c1
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c43
-rw-r--r--drivers/mtd/devices/elm.c2
-rw-r--r--drivers/mtd/nand/nand_base.c6
-rw-r--r--drivers/mtd/ubi/fastmap.c4
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c43
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c21
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c7
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c66
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h12
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c16
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c19
-rw-r--r--drivers/net/ethernet/realtek/r8169.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c42
-rw-r--r--drivers/net/fddi/defxx.c17
-rw-r--r--drivers/net/hyperv/netvsc.c4
-rw-r--r--drivers/net/phy/dp83640.c6
-rw-r--r--drivers/net/phy/mdio_bus.c45
-rw-r--r--drivers/net/phy/phy_device.c15
-rw-r--r--drivers/net/ppp/ppp_generic.c30
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/usb/cdc_ether.c16
-rw-r--r--drivers/net/usb/hso.c50
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/usb/r8152.c14
-rw-r--r--drivers/net/usb/smsc95xx.c14
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wan/farsync.c112
-rw-r--r--drivers/net/wan/x25_asy.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c65
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c1
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c1
-rw-r--r--drivers/net/wireless/mwifiex/main.c1
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c1
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c2
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c1
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c28
-rw-r--r--drivers/net/xen-netback/netback.c86
-rw-r--r--drivers/net/xen-netfront.c27
-rw-r--r--drivers/of/fdt.c66
-rw-r--r--drivers/of/of_mdio.c34
-rw-r--r--drivers/parport/Kconfig12
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pnp/pnpacpi/core.c3
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c8
-rw-r--r--drivers/s390/char/raw3270.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c9
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/staging/media/omap4iss/Kconfig2
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_intf.c4
-rw-r--r--drivers/staging/vt6655/bssdb.c2
-rw-r--r--drivers/staging/vt6655/device_main.c7
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/core/hub.c19
-rw-r--r--drivers/xen/balloon.c12
-rw-r--r--drivers/xen/grant-table.c9
-rw-r--r--drivers/xen/manage.c5
-rw-r--r--fs/afs/main.c4
-rw-r--r--fs/aio.c7
-rw-r--r--fs/btrfs/ordered-data.c11
-rw-r--r--fs/btrfs/volumes.c8
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/direct-io.c23
-rw-r--r--fs/fuse/dev.c51
-rw-r--r--fs/fuse/dir.c41
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/fuse/inode.c27
-rw-r--r--fs/gfs2/file.c4
-rw-r--r--fs/gfs2/glock.c14
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/lock_dlm.c4
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/namei.c3
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs3acl.c43
-rw-r--r--fs/nfs/nfs3proc.c4
-rw-r--r--fs/nfs/pagelist.c20
-rw-r--r--fs/nfs/write.c335
-rw-r--r--fs/nfsd/nfs4xdr.c4
-rw-r--r--fs/open.c5
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/xattr.c2
-rw-r--r--fs/xfs/xfs_bmap.c7
-rw-r--r--fs/xfs/xfs_bmap.h4
-rw-r--r--fs/xfs/xfs_bmap_util.c53
-rw-r--r--fs/xfs/xfs_bmap_util.h4
-rw-r--r--fs/xfs/xfs_btree.c82
-rw-r--r--fs/xfs/xfs_iomap.c3
-rw-r--r--fs/xfs/xfs_sb.c25
-rw-r--r--include/dt-bindings/pinctrl/dra.h7
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/of_fdt.h3
-rw-r--r--include/linux/of_mdio.h8
-rw-r--r--include/linux/osq_lock.h27
-rw-r--r--include/linux/pagemap.h12
-rw-r--r--include/linux/rcupdate.h46
-rw-r--r--include/linux/rwsem-spinlock.h8
-rw-r--r--include/linux/rwsem.h34
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/net/ip.h11
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netns/ieee802154_6lowpan.h2
-rw-r--r--include/net/netns/nftables.h2
-rw-r--r--include/net/sock.h12
-rw-r--r--include/uapi/linux/fuse.h3
-rw-r--r--include/xen/grant_table.h1
-rw-r--r--kernel/Kconfig.locks9
-rw-r--r--kernel/events/core.c34
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/kprobes.c14
-rw-r--r--kernel/locking/mcs_spinlock.c64
-rw-r--r--kernel/locking/mcs_spinlock.h9
-rw-r--r--kernel/locking/mutex.c2
-rw-r--r--kernel/locking/rwsem-spinlock.c28
-rw-r--r--kernel/locking/rwsem-xadd.c16
-rw-r--r--kernel/locking/rwsem.c2
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/rcu/rcutorture.c4
-rw-r--r--kernel/rcu/tree.c140
-rw-r--r--kernel/rcu/tree.h6
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/update.c22
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/time/alarmtimer.c20
-rw-r--r--kernel/time/clockevents.c10
-rw-r--r--kernel/time/sched_clock.c4
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/trace/trace.c20
-rw-r--r--kernel/trace/trace_clock.c9
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--lib/cpumask.c2
-rw-r--r--mm/filemap.c13
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory-failure.c18
-rw-r--r--mm/memory.c24
-rw-r--r--mm/migrate.c5
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c31
-rw-r--r--mm/rmap.c10
-rw-r--r--mm/shmem.c102
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/truncate.c11
-rw-r--r--net/8021q/vlan_dev.c13
-rw-r--r--net/appletalk/ddp.c3
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c44
-rw-r--r--net/batman-adv/soft-interface.c60
-rw-r--r--net/batman-adv/translation-table.c26
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/bluetooth/hci_conn.c12
-rw-r--r--net/bluetooth/smp.c60
-rw-r--r--net/compat.c9
-rw-r--r--net/core/dev.c32
-rw-r--r--net/core/iovec.c6
-rw-r--r--net/core/neighbour.c11
-rw-r--r--net/dns_resolver/dns_query.c2
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/gre_demux.c1
-rw-r--r--net/ipv4/gre_offload.c3
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/ipv4/ip_tunnel.c12
-rw-r--r--net/ipv4/route.c47
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_offload.c2
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/mcast.c13
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/mac80211/cfg.c5
-rw-r--r--net/mac80211/tx.c20
-rw-r--r--net/mac80211/util.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c1
-rw-r--r--net/netfilter/nf_tables_api.c140
-rw-r--r--net/netfilter/nf_tables_core.c10
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/openvswitch/datapath.c27
-rw-r--r--net/openvswitch/flow.c4
-rw-r--r--net/openvswitch/flow.h5
-rw-r--r--net/openvswitch/flow_table.c16
-rw-r--r--net/openvswitch/flow_table.h3
-rw-r--r--net/openvswitch/vport-gre.c17
-rw-r--r--net/sched/cls_u32.c19
-rw-r--r--net/sctp/associola.c1
-rw-r--r--net/sctp/ulpevent.c122
-rw-r--r--net/tipc/bcast.c1
-rw-r--r--net/tipc/msg.c11
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c11
-rw-r--r--net/wireless/reg.c22
-rw-r--r--net/wireless/trace.h3
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_user.c7
-rw-r--r--sound/firewire/bebob/bebob_maudio.c53
-rw-r--r--sound/pci/hda/hda_controller.c3
-rw-r--r--sound/pci/hda/hda_intel.c12
-rw-r--r--sound/pci/hda/hda_priv.h1
-rw-r--r--sound/pci/hda/hda_tegra.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--tools/lib/lockdep/include/liblockdep/mutex.h4
-rw-r--r--tools/lib/lockdep/include/liblockdep/rwlock.h8
-rw-r--r--tools/lib/lockdep/preload.c20
-rw-r--r--tools/perf/ui/browsers/hists.c21
-rw-r--r--tools/perf/util/machine.c54
-rw-r--r--virt/kvm/arm/vgic.c24
467 files changed, 4180 insertions, 2301 deletions
diff --git a/.mailmap b/.mailmap
index df1baba43a64..1ad68731fb47 100644
--- a/.mailmap
+++ b/.mailmap
@@ -62,6 +62,11 @@ Jeff Garzik <jgarzik@pretzel.yyz.us>
62Jens Axboe <axboe@suse.de> 62Jens Axboe <axboe@suse.de>
63Jens Osterkamp <Jens.Osterkamp@de.ibm.com> 63Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
64John Stultz <johnstul@us.ibm.com> 64John Stultz <johnstul@us.ibm.com>
65<josh@joshtriplett.org> <josh@freedesktop.org>
66<josh@joshtriplett.org> <josh@kernel.org>
67<josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
68<josh@joshtriplett.org> <josht@us.ibm.com>
69<josh@joshtriplett.org> <josht@vnet.ibm.com>
65Juha Yrjola <at solidboot.com> 70Juha Yrjola <at solidboot.com>
66Juha Yrjola <juha.yrjola@nokia.com> 71Juha Yrjola <juha.yrjola@nokia.com>
67Juha Yrjola <juha.yrjola@solidboot.com> 72Juha Yrjola <juha.yrjola@solidboot.com>
diff --git a/CREDITS b/CREDITS
index 28ee1514b9de..a80b66718f66 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3511,10 +3511,11 @@ S: MacGregor A.C.T 2615
3511S: Australia 3511S: Australia
3512 3512
3513N: Josh Triplett 3513N: Josh Triplett
3514E: josh@freedesktop.org 3514E: josh@joshtriplett.org
3515P: 1024D/D0FE7AFB B24A 65C9 1D71 2AC2 DE87 CA26 189B 9946 D0FE 7AFB 3515P: 4096R/8AFF873D 758E 5042 E397 4BA3 3A9C 1E67 0ED9 A3DF 8AFF 873D
3516D: rcutorture maintainer 3516D: RCU and rcutorture
3517D: lock annotations, finding and fixing lock bugs 3517D: lock annotations, finding and fixing lock bugs
3518D: kernel tinification
3518 3519
3519N: Winfried Trümper 3520N: Winfried Trümper
3520E: winni@xpilot.org 3521E: winni@xpilot.org
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index fd786ea13a1f..e182be5e3c83 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -60,12 +60,6 @@ If the driver needs to perform more complex initialization like getting and
60configuring GPIOs it can get its ACPI handle and extract this information 60configuring GPIOs it can get its ACPI handle and extract this information
61from ACPI tables. 61from ACPI tables.
62 62
63Currently the kernel is not able to automatically determine from which ACPI
64device it should make the corresponding platform device so we need to add
65the ACPI device explicitly to acpi_platform_device_ids list defined in
66drivers/acpi/acpi_platform.c. This limitation is only for the platform
67devices, SPI and I2C devices are created automatically as described below.
68
69DMA support 63DMA support
70~~~~~~~~~~~ 64~~~~~~~~~~~
71DMA controllers enumerated via ACPI should be registered in the system to 65DMA controllers enumerated via ACPI should be registered in the system to
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
index f055515d2b62..366690cb86a3 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
@@ -8,10 +8,12 @@ Both required and optional properties listed below must be defined
8under node /cpus/cpu@0. 8under node /cpus/cpu@0.
9 9
10Required properties: 10Required properties:
11- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt 11- None
12 for details
13 12
14Optional properties: 13Optional properties:
14- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt for
15 details. OPPs *must* be supplied either via DT, i.e. this property, or
16 populated at runtime.
15- clock-latency: Specify the possible maximum transition latency for clock, 17- clock-latency: Specify the possible maximum transition latency for clock,
16 in unit of nanoseconds. 18 in unit of nanoseconds.
17- voltage-tolerance: Specify the CPU voltage tolerance in percentage. 19- voltage-tolerance: Specify the CPU voltage tolerance in percentage.
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index f1ea2c69648d..c587a966413e 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -281,6 +281,19 @@ gestures can normally be extracted from it.
281If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT 281If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
282device. 282device.
283 283
284INPUT_PROP_TOPBUTTONPAD:
285-----------------------
286Some laptops, most notably the Lenovo *40 series provide a trackstick
287device but do not have physical buttons associated with the trackstick
288device. Instead, the top area of the touchpad is marked to show
289visual/haptic areas for left, middle, right buttons intended to be used
290with the trackstick.
291
292If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
293accordingly. This property does not affect kernel behavior.
294The kernel does not provide button emulation for such devices but treats
295them as any other INPUT_PROP_BUTTONPAD device.
296
284Guidelines: 297Guidelines:
285========== 298==========
286The guidelines below ensure proper single-touch and multi-finger functionality. 299The guidelines below ensure proper single-touch and multi-finger functionality.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 90c12c591168..0cd50491c0a4 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2812,6 +2812,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2812 leaf rcu_node structure. Useful for very large 2812 leaf rcu_node structure. Useful for very large
2813 systems. 2813 systems.
2814 2814
2815 rcutree.jiffies_till_sched_qs= [KNL]
2816 Set required age in jiffies for a
2817 given grace period before RCU starts
2818 soliciting quiescent-state help from
2819 rcu_note_context_switch().
2820
2815 rcutree.jiffies_till_first_fqs= [KNL] 2821 rcutree.jiffies_till_first_fqs= [KNL]
2816 Set delay from grace-period initialization to 2822 Set delay from grace-period initialization to
2817 first attempt to force quiescent states. 2823 first attempt to force quiescent states.
@@ -3548,7 +3554,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3548 the allocated input device; If set to 0, video driver 3554 the allocated input device; If set to 0, video driver
3549 will only send out the event without touching backlight 3555 will only send out the event without touching backlight
3550 brightness level. 3556 brightness level.
3551 default: 0 3557 default: 1
3552 3558
3553 virtio_mmio.device= 3559 virtio_mmio.device=
3554 [VMMIO] Memory mapped virtio (platform) device. 3560 [VMMIO] Memory mapped virtio (platform) device.
diff --git a/MAINTAINERS b/MAINTAINERS
index 992335ca0b2c..ffd4ace8ade8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -156,7 +156,6 @@ F: drivers/net/hamradio/6pack.c
156 156
1578169 10/100/1000 GIGABIT ETHERNET DRIVER 1578169 10/100/1000 GIGABIT ETHERNET DRIVER
158M: Realtek linux nic maintainers <nic_swsd@realtek.com> 158M: Realtek linux nic maintainers <nic_swsd@realtek.com>
159M: Francois Romieu <romieu@fr.zoreil.com>
160L: netdev@vger.kernel.org 159L: netdev@vger.kernel.org
161S: Maintained 160S: Maintained
162F: drivers/net/ethernet/realtek/r8169.c 161F: drivers/net/ethernet/realtek/r8169.c
@@ -4511,8 +4510,7 @@ S: Supported
4511F: drivers/idle/i7300_idle.c 4510F: drivers/idle/i7300_idle.c
4512 4511
4513IEEE 802.15.4 SUBSYSTEM 4512IEEE 802.15.4 SUBSYSTEM
4514M: Alexander Smirnov <alex.bluesman.smirnov@gmail.com> 4513M: Alexander Aring <alex.aring@gmail.com>
4515M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
4516L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) 4514L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
4517W: http://apps.sourceforge.net/trac/linux-zigbee 4515W: http://apps.sourceforge.net/trac/linux-zigbee
4518T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git 4516T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
@@ -6958,6 +6956,12 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6958S: Maintained 6956S: Maintained
6959F: drivers/pinctrl/pinctrl-at91.c 6957F: drivers/pinctrl/pinctrl-at91.c
6960 6958
6959PIN CONTROLLER - RENESAS
6960M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
6961L: linux-sh@vger.kernel.org
6962S: Maintained
6963F: drivers/pinctrl/sh-pfc/
6964
6961PIN CONTROLLER - SAMSUNG 6965PIN CONTROLLER - SAMSUNG
6962M: Tomasz Figa <t.figa@samsung.com> 6966M: Tomasz Figa <t.figa@samsung.com>
6963M: Thomas Abraham <thomas.abraham@linaro.org> 6967M: Thomas Abraham <thomas.abraham@linaro.org>
@@ -7420,7 +7424,7 @@ S: Orphan
7420F: drivers/net/wireless/ray* 7424F: drivers/net/wireless/ray*
7421 7425
7422RCUTORTURE MODULE 7426RCUTORTURE MODULE
7423M: Josh Triplett <josh@freedesktop.org> 7427M: Josh Triplett <josh@joshtriplett.org>
7424M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 7428M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
7425L: linux-kernel@vger.kernel.org 7429L: linux-kernel@vger.kernel.org
7426S: Supported 7430S: Supported
@@ -8033,6 +8037,16 @@ F: drivers/ata/
8033F: include/linux/ata.h 8037F: include/linux/ata.h
8034F: include/linux/libata.h 8038F: include/linux/libata.h
8035 8039
8040SERIAL ATA AHCI PLATFORM devices support
8041M: Hans de Goede <hdegoede@redhat.com>
8042M: Tejun Heo <tj@kernel.org>
8043L: linux-ide@vger.kernel.org
8044T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
8045S: Supported
8046F: drivers/ata/ahci_platform.c
8047F: drivers/ata/libahci_platform.c
8048F: include/linux/ahci_platform.h
8049
8036SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER 8050SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
8037M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com> 8051M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
8038L: linux-scsi@vger.kernel.org 8052L: linux-scsi@vger.kernel.org
@@ -8920,7 +8934,7 @@ M: Stephen Warren <swarren@wwwdotorg.org>
8920M: Thierry Reding <thierry.reding@gmail.com> 8934M: Thierry Reding <thierry.reding@gmail.com>
8921L: linux-tegra@vger.kernel.org 8935L: linux-tegra@vger.kernel.org
8922Q: http://patchwork.ozlabs.org/project/linux-tegra/list/ 8936Q: http://patchwork.ozlabs.org/project/linux-tegra/list/
8923T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git 8937T: git git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux.git
8924S: Supported 8938S: Supported
8925N: [^a-z]tegra 8939N: [^a-z]tegra
8926 8940
diff --git a/Makefile b/Makefile
index f3c543df4697..d0901b46b4bf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 16 2PATCHLEVEL = 16
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc5 4EXTRAVERSION =
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -688,6 +688,8 @@ KBUILD_CFLAGS += -fomit-frame-pointer
688endif 688endif
689endif 689endif
690 690
691KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
692
691ifdef CONFIG_DEBUG_INFO 693ifdef CONFIG_DEBUG_INFO
692KBUILD_CFLAGS += -g 694KBUILD_CFLAGS += -g
693KBUILD_AFLAGS += -Wa,-gdwarf-2 695KBUILD_AFLAGS += -Wa,-gdwarf-2
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 245058b3b0ef..290f02ee0157 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,6 +6,7 @@ config ARM
6 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 6 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
7 select ARCH_HAVE_CUSTOM_GPIO_H 7 select ARCH_HAVE_CUSTOM_GPIO_H
8 select ARCH_MIGHT_HAVE_PC_PARPORT 8 select ARCH_MIGHT_HAVE_PC_PARPORT
9 select ARCH_SUPPORTS_ATOMIC_RMW
9 select ARCH_USE_BUILTIN_BSWAP 10 select ARCH_USE_BUILTIN_BSWAP
10 select ARCH_USE_CMPXCHG_LOCKREF 11 select ARCH_USE_CMPXCHG_LOCKREF
11 select ARCH_WANT_IPC_PARSE_VERSION 12 select ARCH_WANT_IPC_PARSE_VERSION
@@ -312,7 +313,7 @@ config ARCH_MULTIPLATFORM
312config ARCH_INTEGRATOR 313config ARCH_INTEGRATOR
313 bool "ARM Ltd. Integrator family" 314 bool "ARM Ltd. Integrator family"
314 select ARM_AMBA 315 select ARM_AMBA
315 select ARM_PATCH_PHYS_VIRT 316 select ARM_PATCH_PHYS_VIRT if MMU
316 select AUTO_ZRELADDR 317 select AUTO_ZRELADDR
317 select COMMON_CLK 318 select COMMON_CLK
318 select COMMON_CLK_VERSATILE 319 select COMMON_CLK_VERSATILE
@@ -658,7 +659,7 @@ config ARCH_MSM
658config ARCH_SHMOBILE_LEGACY 659config ARCH_SHMOBILE_LEGACY
659 bool "Renesas ARM SoCs (non-multiplatform)" 660 bool "Renesas ARM SoCs (non-multiplatform)"
660 select ARCH_SHMOBILE 661 select ARCH_SHMOBILE
661 select ARM_PATCH_PHYS_VIRT 662 select ARM_PATCH_PHYS_VIRT if MMU
662 select CLKDEV_LOOKUP 663 select CLKDEV_LOOKUP
663 select GENERIC_CLOCKEVENTS 664 select GENERIC_CLOCKEVENTS
664 select HAVE_ARM_SCU if SMP 665 select HAVE_ARM_SCU if SMP
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 287795985e32..b84bac5bada4 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -925,7 +925,7 @@
925 compatible = "atmel,at91rm9200-ohci", "usb-ohci"; 925 compatible = "atmel,at91rm9200-ohci", "usb-ohci";
926 reg = <0x00500000 0x00100000>; 926 reg = <0x00500000 0x00100000>;
927 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; 927 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
928 clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, 928 clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
929 <&uhpck>; 929 <&uhpck>;
930 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; 930 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
931 status = "disabled"; 931 status = "disabled";
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 2ebc42140ea6..2c0d6ea3ab41 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -1124,6 +1124,7 @@
1124 compatible = "atmel,at91sam9rl-pwm"; 1124 compatible = "atmel,at91sam9rl-pwm";
1125 reg = <0xf8034000 0x300>; 1125 reg = <0xf8034000 0x300>;
1126 interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>; 1126 interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>;
1127 clocks = <&pwm_clk>;
1127 #pwm-cells = <3>; 1128 #pwm-cells = <3>;
1128 status = "disabled"; 1129 status = "disabled";
1129 }; 1130 };
@@ -1155,8 +1156,7 @@
1155 compatible = "atmel,at91rm9200-ohci", "usb-ohci"; 1156 compatible = "atmel,at91rm9200-ohci", "usb-ohci";
1156 reg = <0x00600000 0x100000>; 1157 reg = <0x00600000 0x100000>;
1157 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; 1158 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
1158 clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, 1159 clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
1159 <&uhpck>;
1160 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; 1160 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
1161 status = "disabled"; 1161 status = "disabled";
1162 }; 1162 };
diff --git a/arch/arm/boot/dts/hi3620.dtsi b/arch/arm/boot/dts/hi3620.dtsi
index ab1116d086be..83a5b8685bd9 100644
--- a/arch/arm/boot/dts/hi3620.dtsi
+++ b/arch/arm/boot/dts/hi3620.dtsi
@@ -73,7 +73,7 @@
73 73
74 L2: l2-cache { 74 L2: l2-cache {
75 compatible = "arm,pl310-cache"; 75 compatible = "arm,pl310-cache";
76 reg = <0xfc10000 0x100000>; 76 reg = <0x100000 0x100000>;
77 interrupts = <0 15 4>; 77 interrupts = <0 15 4>;
78 cache-unified; 78 cache-unified;
79 cache-level = <2>; 79 cache-level = <2>;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 1fe45d1f75ec..b15f1a77d684 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -353,7 +353,7 @@
353 }; 353 };
354 354
355 twl_power: power { 355 twl_power: power {
356 compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off"; 356 compatible = "ti,twl4030-power-n900";
357 ti,use_poweroff; 357 ti,use_poweroff;
358 }; 358 };
359}; 359};
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 8d7ffaeff6e0..79f68acfd5d4 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -540,9 +540,9 @@
540 #clock-cells = <0>; 540 #clock-cells = <0>;
541 clock-output-names = "sd1"; 541 clock-output-names = "sd1";
542 }; 542 };
543 sd2_clk: sd3_clk@e615007c { 543 sd2_clk: sd3_clk@e615026c {
544 compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock"; 544 compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
545 reg = <0 0xe615007c 0 4>; 545 reg = <0 0xe615026c 0 4>;
546 clocks = <&pll1_div2_clk>; 546 clocks = <&pll1_div2_clk>;
547 #clock-cells = <0>; 547 #clock-cells = <0>;
548 clock-output-names = "sd2"; 548 clock-output-names = "sd2";
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index f557feb997f4..90d8b6c7a205 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -4,7 +4,7 @@
4 */ 4 */
5 5
6/dts-v1/; 6/dts-v1/;
7/include/ "ste-nomadik-stn8815.dtsi" 7#include "ste-nomadik-stn8815.dtsi"
8 8
9/ { 9/ {
10 model = "Calao Systems USB-S8815"; 10 model = "Calao Systems USB-S8815";
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index d316c955bd5f..dbcf521b017f 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -1,7 +1,9 @@
1/* 1/*
2 * Device Tree for the ST-Ericsson Nomadik 8815 STn8815 SoC 2 * Device Tree for the ST-Ericsson Nomadik 8815 STn8815 SoC
3 */ 3 */
4/include/ "skeleton.dtsi" 4
5#include <dt-bindings/gpio/gpio.h>
6#include "skeleton.dtsi"
5 7
6/ { 8/ {
7 #address-cells = <1>; 9 #address-cells = <1>;
@@ -842,8 +844,7 @@
842 bus-width = <4>; 844 bus-width = <4>;
843 cap-mmc-highspeed; 845 cap-mmc-highspeed;
844 cap-sd-highspeed; 846 cap-sd-highspeed;
845 cd-gpios = <&gpio3 15 0x1>; 847 cd-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>;
846 cd-inverted;
847 pinctrl-names = "default"; 848 pinctrl-names = "default";
848 pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>; 849 pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
849 vmmc-supply = <&vmmc_regulator>; 850 vmmc-supply = <&vmmc_regulator>;
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
index 4522366da759..15468fbbdea3 100644
--- a/arch/arm/crypto/aesbs-glue.c
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -137,7 +137,7 @@ static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
137 dst += AES_BLOCK_SIZE; 137 dst += AES_BLOCK_SIZE;
138 } while (--blocks); 138 } while (--blocks);
139 } 139 }
140 err = blkcipher_walk_done(desc, &walk, 0); 140 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
141 } 141 }
142 return err; 142 return err;
143} 143}
@@ -158,7 +158,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
158 bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, 158 bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
159 walk.nbytes, &ctx->dec, walk.iv); 159 walk.nbytes, &ctx->dec, walk.iv);
160 kernel_neon_end(); 160 kernel_neon_end();
161 err = blkcipher_walk_done(desc, &walk, 0); 161 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
162 } 162 }
163 while (walk.nbytes) { 163 while (walk.nbytes) {
164 u32 blocks = walk.nbytes / AES_BLOCK_SIZE; 164 u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -182,7 +182,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
182 dst += AES_BLOCK_SIZE; 182 dst += AES_BLOCK_SIZE;
183 src += AES_BLOCK_SIZE; 183 src += AES_BLOCK_SIZE;
184 } while (--blocks); 184 } while (--blocks);
185 err = blkcipher_walk_done(desc, &walk, 0); 185 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
186 } 186 }
187 return err; 187 return err;
188} 188}
@@ -268,7 +268,7 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
268 bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, 268 bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
269 walk.nbytes, &ctx->enc, walk.iv); 269 walk.nbytes, &ctx->enc, walk.iv);
270 kernel_neon_end(); 270 kernel_neon_end();
271 err = blkcipher_walk_done(desc, &walk, 0); 271 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
272 } 272 }
273 return err; 273 return err;
274} 274}
@@ -292,7 +292,7 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
292 bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, 292 bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
293 walk.nbytes, &ctx->dec, walk.iv); 293 walk.nbytes, &ctx->dec, walk.iv);
294 kernel_neon_end(); 294 kernel_neon_end();
295 err = blkcipher_walk_done(desc, &walk, 0); 295 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
296 } 296 }
297 return err; 297 return err;
298} 298}
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 060a75e99263..0406cb3f1af7 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -50,6 +50,7 @@ struct machine_desc {
50 struct smp_operations *smp; /* SMP operations */ 50 struct smp_operations *smp; /* SMP operations */
51 bool (*smp_init)(void); 51 bool (*smp_init)(void);
52 void (*fixup)(struct tag *, char **); 52 void (*fixup)(struct tag *, char **);
53 void (*dt_fixup)(void);
53 void (*init_meminfo)(void); 54 void (*init_meminfo)(void);
54 void (*reserve)(void);/* reserve mem blocks */ 55 void (*reserve)(void);/* reserve mem blocks */
55 void (*map_io)(void);/* IO mapping function */ 56 void (*map_io)(void);/* IO mapping function */
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index e94a157ddff1..11c54de9f8cf 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -212,7 +212,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
212 mdesc_best = &__mach_desc_GENERIC_DT; 212 mdesc_best = &__mach_desc_GENERIC_DT;
213#endif 213#endif
214 214
215 if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) 215 if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
216 return NULL; 216 return NULL;
217 217
218 mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach); 218 mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
@@ -237,6 +237,12 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
237 dump_machine_table(); /* does not return */ 237 dump_machine_table(); /* does not return */
238 } 238 }
239 239
240 /* We really don't want to do this, but sometimes firmware provides buggy data */
241 if (mdesc->dt_fixup)
242 mdesc->dt_fixup();
243
244 early_init_dt_scan_nodes();
245
240 /* Change machine number to match the mdesc we're using */ 246 /* Change machine number to match the mdesc we're using */
241 __machine_arch_type = mdesc->nr; 247 __machine_arch_type = mdesc->nr;
242 248
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index a5599cfc43cb..2b32978ae905 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -94,13 +94,19 @@ ENTRY(iwmmxt_task_enable)
94 94
95 mrc p15, 0, r2, c2, c0, 0 95 mrc p15, 0, r2, c2, c0, 0
96 mov r2, r2 @ cpwait 96 mov r2, r2 @ cpwait
97 bl concan_save
97 98
98 teq r1, #0 @ test for last ownership 99#ifdef CONFIG_PREEMPT_COUNT
99 mov lr, r9 @ normal exit from exception 100 get_thread_info r10
100 beq concan_load @ no owner, skip save 101#endif
1024: dec_preempt_count r10, r3
103 mov pc, r9 @ normal exit from exception
101 104
102concan_save: 105concan_save:
103 106
107 teq r1, #0 @ test for last ownership
108 beq concan_load @ no owner, skip save
109
104 tmrc r2, wCon 110 tmrc r2, wCon
105 111
106 @ CUP? wCx 112 @ CUP? wCx
@@ -138,7 +144,7 @@ concan_dump:
138 wstrd wR15, [r1, #MMX_WR15] 144 wstrd wR15, [r1, #MMX_WR15]
139 145
1402: teq r0, #0 @ anything to load? 1462: teq r0, #0 @ anything to load?
141 beq 3f 147 moveq pc, lr @ if not, return
142 148
143concan_load: 149concan_load:
144 150
@@ -171,14 +177,9 @@ concan_load:
171 @ clear CUP/MUP (only if r1 != 0) 177 @ clear CUP/MUP (only if r1 != 0)
172 teq r1, #0 178 teq r1, #0
173 mov r2, #0 179 mov r2, #0
174 beq 3f 180 moveq pc, lr
175 tmcr wCon, r2
176 181
1773: 182 tmcr wCon, r2
178#ifdef CONFIG_PREEMPT_COUNT
179 get_thread_info r10
180#endif
1814: dec_preempt_count r10, r3
182 mov pc, lr 183 mov pc, lr
183 184
184/* 185/*
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 778c2f7024ff..a74b53c1b7df 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -160,12 +160,16 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
160static struct undef_hook kgdb_brkpt_hook = { 160static struct undef_hook kgdb_brkpt_hook = {
161 .instr_mask = 0xffffffff, 161 .instr_mask = 0xffffffff,
162 .instr_val = KGDB_BREAKINST, 162 .instr_val = KGDB_BREAKINST,
163 .cpsr_mask = MODE_MASK,
164 .cpsr_val = SVC_MODE,
163 .fn = kgdb_brk_fn 165 .fn = kgdb_brk_fn
164}; 166};
165 167
166static struct undef_hook kgdb_compiled_brkpt_hook = { 168static struct undef_hook kgdb_compiled_brkpt_hook = {
167 .instr_mask = 0xffffffff, 169 .instr_mask = 0xffffffff,
168 .instr_val = KGDB_COMPILED_BREAK, 170 .instr_val = KGDB_COMPILED_BREAK,
171 .cpsr_mask = MODE_MASK,
172 .cpsr_val = SVC_MODE,
169 .fn = kgdb_compiled_brk_fn 173 .fn = kgdb_compiled_brk_fn
170}; 174};
171 175
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 9d853189028b..e35d880f9773 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -275,7 +275,7 @@ void store_cpu_topology(unsigned int cpuid)
275 cpu_topology[cpuid].socket_id, mpidr); 275 cpu_topology[cpuid].socket_id, mpidr);
276} 276}
277 277
278static inline const int cpu_corepower_flags(void) 278static inline int cpu_corepower_flags(void)
279{ 279{
280 return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; 280 return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
281} 281}
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 46d893fcbe85..66c9b9614f3c 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -335,6 +335,15 @@ static void __init exynos_reserve(void)
335#endif 335#endif
336} 336}
337 337
338static void __init exynos_dt_fixup(void)
339{
340 /*
341 * Some versions of uboot pass garbage entries in the memory node,
342 * use the old CONFIG_ARM_NR_BANKS
343 */
344 of_fdt_limit_memory(8);
345}
346
338DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)") 347DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
339 /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */ 348 /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
340 /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */ 349 /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
@@ -348,4 +357,5 @@ DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
348 .dt_compat = exynos_dt_compat, 357 .dt_compat = exynos_dt_compat,
349 .restart = exynos_restart, 358 .restart = exynos_restart,
350 .reserve = exynos_reserve, 359 .reserve = exynos_reserve,
360 .dt_fixup = exynos_dt_fixup,
351MACHINE_END 361MACHINE_END
diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c
index 8a134d019cb3..920a4baa53cd 100644
--- a/arch/arm/mach-exynos/hotplug.c
+++ b/arch/arm/mach-exynos/hotplug.c
@@ -40,15 +40,17 @@ static inline void cpu_leave_lowpower(void)
40 40
41static inline void platform_do_lowpower(unsigned int cpu, int *spurious) 41static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
42{ 42{
43 u32 mpidr = cpu_logical_map(cpu);
44 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
45
43 for (;;) { 46 for (;;) {
44 47
45 /* make cpu1 to be turned off at next WFI command */ 48 /* Turn the CPU off on next WFI instruction. */
46 if (cpu == 1) 49 exynos_cpu_power_down(core_id);
47 exynos_cpu_power_down(cpu);
48 50
49 wfi(); 51 wfi();
50 52
51 if (pen_release == cpu_logical_map(cpu)) { 53 if (pen_release == core_id) {
52 /* 54 /*
53 * OK, proper wakeup, we're done 55 * OK, proper wakeup, we're done
54 */ 56 */
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 1c8d31e39520..50b9aad5e27b 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -90,7 +90,8 @@ static void exynos_secondary_init(unsigned int cpu)
90static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) 90static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
91{ 91{
92 unsigned long timeout; 92 unsigned long timeout;
93 unsigned long phys_cpu = cpu_logical_map(cpu); 93 u32 mpidr = cpu_logical_map(cpu);
94 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
94 int ret = -ENOSYS; 95 int ret = -ENOSYS;
95 96
96 /* 97 /*
@@ -104,17 +105,18 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
104 * the holding pen - release it, then wait for it to flag 105 * the holding pen - release it, then wait for it to flag
105 * that it has been released by resetting pen_release. 106 * that it has been released by resetting pen_release.
106 * 107 *
107 * Note that "pen_release" is the hardware CPU ID, whereas 108 * Note that "pen_release" is the hardware CPU core ID, whereas
108 * "cpu" is Linux's internal ID. 109 * "cpu" is Linux's internal ID.
109 */ 110 */
110 write_pen_release(phys_cpu); 111 write_pen_release(core_id);
111 112
112 if (!exynos_cpu_power_state(cpu)) { 113 if (!exynos_cpu_power_state(core_id)) {
113 exynos_cpu_power_up(cpu); 114 exynos_cpu_power_up(core_id);
114 timeout = 10; 115 timeout = 10;
115 116
116 /* wait max 10 ms until cpu1 is on */ 117 /* wait max 10 ms until cpu1 is on */
117 while (exynos_cpu_power_state(cpu) != S5P_CORE_LOCAL_PWR_EN) { 118 while (exynos_cpu_power_state(core_id)
119 != S5P_CORE_LOCAL_PWR_EN) {
118 if (timeout-- == 0) 120 if (timeout-- == 0)
119 break; 121 break;
120 122
@@ -145,20 +147,20 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
145 * Try to set boot address using firmware first 147 * Try to set boot address using firmware first
146 * and fall back to boot register if it fails. 148 * and fall back to boot register if it fails.
147 */ 149 */
148 ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr); 150 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
149 if (ret && ret != -ENOSYS) 151 if (ret && ret != -ENOSYS)
150 goto fail; 152 goto fail;
151 if (ret == -ENOSYS) { 153 if (ret == -ENOSYS) {
152 void __iomem *boot_reg = cpu_boot_reg(phys_cpu); 154 void __iomem *boot_reg = cpu_boot_reg(core_id);
153 155
154 if (IS_ERR(boot_reg)) { 156 if (IS_ERR(boot_reg)) {
155 ret = PTR_ERR(boot_reg); 157 ret = PTR_ERR(boot_reg);
156 goto fail; 158 goto fail;
157 } 159 }
158 __raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); 160 __raw_writel(boot_addr, cpu_boot_reg(core_id));
159 } 161 }
160 162
161 call_firmware_op(cpu_boot, phys_cpu); 163 call_firmware_op(cpu_boot, core_id);
162 164
163 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); 165 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
164 166
@@ -227,22 +229,24 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
227 * boot register if it fails. 229 * boot register if it fails.
228 */ 230 */
229 for (i = 1; i < max_cpus; ++i) { 231 for (i = 1; i < max_cpus; ++i) {
230 unsigned long phys_cpu;
231 unsigned long boot_addr; 232 unsigned long boot_addr;
233 u32 mpidr;
234 u32 core_id;
232 int ret; 235 int ret;
233 236
234 phys_cpu = cpu_logical_map(i); 237 mpidr = cpu_logical_map(i);
238 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
235 boot_addr = virt_to_phys(exynos4_secondary_startup); 239 boot_addr = virt_to_phys(exynos4_secondary_startup);
236 240
237 ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr); 241 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
238 if (ret && ret != -ENOSYS) 242 if (ret && ret != -ENOSYS)
239 break; 243 break;
240 if (ret == -ENOSYS) { 244 if (ret == -ENOSYS) {
241 void __iomem *boot_reg = cpu_boot_reg(phys_cpu); 245 void __iomem *boot_reg = cpu_boot_reg(core_id);
242 246
243 if (IS_ERR(boot_reg)) 247 if (IS_ERR(boot_reg))
244 break; 248 break;
245 __raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); 249 __raw_writel(boot_addr, cpu_boot_reg(core_id));
246 } 250 }
247 } 251 }
248} 252}
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 8e795dea02ec..8556c787e59c 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -70,7 +70,7 @@ static const char *cko_sels[] = { "cko1", "cko2", };
70static const char *lvds_sels[] = { 70static const char *lvds_sels[] = {
71 "dummy", "dummy", "dummy", "dummy", "dummy", "dummy", 71 "dummy", "dummy", "dummy", "dummy", "dummy", "dummy",
72 "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref", 72 "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
73 "pcie_ref", "sata_ref", 73 "pcie_ref_125m", "sata_ref_100m",
74}; 74};
75 75
76enum mx6q_clks { 76enum mx6q_clks {
@@ -491,7 +491,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
491 491
492 /* All existing boards with PCIe use LVDS1 */ 492 /* All existing boards with PCIe use LVDS1 */
493 if (IS_ENABLED(CONFIG_PCI_IMX6)) 493 if (IS_ENABLED(CONFIG_PCI_IMX6))
494 clk_set_parent(clk[lvds1_sel], clk[sata_ref]); 494 clk_set_parent(clk[lvds1_sel], clk[sata_ref_100m]);
495 495
496 /* Set initial power mode */ 496 /* Set initial power mode */
497 imx6q_set_lpm(WAIT_CLOCKED); 497 imx6q_set_lpm(WAIT_CLOCKED);
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 477202fd39cc..2bdc3233abe2 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -292,6 +292,10 @@ static struct notifier_block mvebu_hwcc_nb = {
292 .notifier_call = mvebu_hwcc_notifier, 292 .notifier_call = mvebu_hwcc_notifier,
293}; 293};
294 294
295static struct notifier_block mvebu_hwcc_pci_nb = {
296 .notifier_call = mvebu_hwcc_notifier,
297};
298
295static void __init armada_370_coherency_init(struct device_node *np) 299static void __init armada_370_coherency_init(struct device_node *np)
296{ 300{
297 struct resource res; 301 struct resource res;
@@ -427,7 +431,7 @@ static int __init coherency_pci_init(void)
427{ 431{
428 if (coherency_available()) 432 if (coherency_available())
429 bus_register_notifier(&pci_bus_type, 433 bus_register_notifier(&pci_bus_type,
430 &mvebu_hwcc_nb); 434 &mvebu_hwcc_pci_nb);
431 return 0; 435 return 0;
432} 436}
433 437
diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S
index 5925366bc03c..da5bb292b91c 100644
--- a/arch/arm/mach-mvebu/headsmp-a9.S
+++ b/arch/arm/mach-mvebu/headsmp-a9.S
@@ -15,6 +15,8 @@
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18#include <asm/assembler.h>
19
18 __CPUINIT 20 __CPUINIT
19#define CPU_RESUME_ADDR_REG 0xf10182d4 21#define CPU_RESUME_ADDR_REG 0xf10182d4
20 22
@@ -22,13 +24,18 @@
22.global armada_375_smp_cpu1_enable_code_end 24.global armada_375_smp_cpu1_enable_code_end
23 25
24armada_375_smp_cpu1_enable_code_start: 26armada_375_smp_cpu1_enable_code_start:
25 ldr r0, [pc, #4] 27ARM_BE8(setend be)
28 adr r0, 1f
29 ldr r0, [r0]
26 ldr r1, [r0] 30 ldr r1, [r0]
31ARM_BE8(rev r1, r1)
27 mov pc, r1 32 mov pc, r1
331:
28 .word CPU_RESUME_ADDR_REG 34 .word CPU_RESUME_ADDR_REG
29armada_375_smp_cpu1_enable_code_end: 35armada_375_smp_cpu1_enable_code_end:
30 36
31ENTRY(mvebu_cortex_a9_secondary_startup) 37ENTRY(mvebu_cortex_a9_secondary_startup)
38ARM_BE8(setend be)
32 bl v7_invalidate_l1 39 bl v7_invalidate_l1
33 b secondary_startup 40 b secondary_startup
34ENDPROC(mvebu_cortex_a9_secondary_startup) 41ENDPROC(mvebu_cortex_a9_secondary_startup)
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index a1d407c0febe..25aa8237d668 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -201,12 +201,12 @@ static noinline int do_armada_370_xp_cpu_suspend(unsigned long deepidle)
201 201
202 /* Test the CR_C bit and set it if it was cleared */ 202 /* Test the CR_C bit and set it if it was cleared */
203 asm volatile( 203 asm volatile(
204 "mrc p15, 0, %0, c1, c0, 0 \n\t" 204 "mrc p15, 0, r0, c1, c0, 0 \n\t"
205 "tst %0, #(1 << 2) \n\t" 205 "tst r0, #(1 << 2) \n\t"
206 "orreq %0, %0, #(1 << 2) \n\t" 206 "orreq r0, r0, #(1 << 2) \n\t"
207 "mcreq p15, 0, %0, c1, c0, 0 \n\t" 207 "mcreq p15, 0, r0, c1, c0, 0 \n\t"
208 "isb " 208 "isb "
209 : : "r" (0)); 209 : : : "r0");
210 210
211 pr_warn("Failed to suspend the system\n"); 211 pr_warn("Failed to suspend the system\n");
212 212
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index 17cd39360afe..93914d220069 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -50,6 +50,16 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
50 soc_is_omap54xx() || soc_is_dra7xx()) 50 soc_is_omap54xx() || soc_is_dra7xx())
51 return 1; 51 return 1;
52 52
53 if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
54 ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
55 if (cpu_is_omap24xx())
56 return 0;
57 else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
58 return 0;
59 else
60 return 1;
61 }
62
53 /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes 63 /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
54 * which require H/W based ECC error detection */ 64 * which require H/W based ECC error detection */
55 if ((cpu_is_omap34xx() || cpu_is_omap3630()) && 65 if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
@@ -57,14 +67,6 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
57 (ecc_opt == OMAP_ECC_BCH8_CODE_HW))) 67 (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
58 return 0; 68 return 0;
59 69
60 /*
61 * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1
62 * and AM33xx derivates. Other chips may be added if confirmed to work.
63 */
64 if ((ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW) &&
65 (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0)))
66 return 0;
67
68 /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */ 70 /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
69 if (ecc_opt == OMAP_ECC_HAM1_CODE_HW) 71 if (ecc_opt == OMAP_ECC_HAM1_CODE_HW)
70 return 1; 72 return 1;
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 539e8106eb96..a0fe747634c1 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -168,6 +168,10 @@ static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
168 smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX; 168 smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
169 break; 169 break;
170 170
171 case L310_POWER_CTRL:
172 pr_info_once("OMAP L2C310: ROM does not support power control setting\n");
173 return;
174
171 default: 175 default:
172 WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg); 176 WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
173 return; 177 return;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4c88935654ca..1f88db06b133 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -461,12 +461,21 @@ void __init dma_contiguous_remap(void)
461 map.type = MT_MEMORY_DMA_READY; 461 map.type = MT_MEMORY_DMA_READY;
462 462
463 /* 463 /*
464 * Clear previous low-memory mapping 464 * Clear previous low-memory mapping to ensure that the
465 * TLB does not see any conflicting entries, then flush
466 * the TLB of the old entries before creating new mappings.
467 *
468 * This ensures that any speculatively loaded TLB entries
469 * (even though they may be rare) can not cause any problems,
470 * and ensures that this code is architecturally compliant.
465 */ 471 */
466 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 472 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
467 addr += PMD_SIZE) 473 addr += PMD_SIZE)
468 pmd_clear(pmd_off_k(addr)); 474 pmd_clear(pmd_off_k(addr));
469 475
476 flush_tlb_kernel_range(__phys_to_virt(start),
477 __phys_to_virt(end));
478
470 iotable_init(&map, 1); 479 iotable_init(&map, 1);
471 } 480 }
472} 481}
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 8e0e52eb76b5..c447ec70e868 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -9,6 +9,11 @@
9#include <asm/sections.h> 9#include <asm/sections.h>
10#include <asm/system_info.h> 10#include <asm/system_info.h>
11 11
12/*
13 * Note: accesses outside of the kernel image and the identity map area
14 * are not supported on any CPU using the idmap tables as its current
15 * page tables.
16 */
12pgd_t *idmap_pgd; 17pgd_t *idmap_pgd;
13phys_addr_t (*arch_virt_to_idmap) (unsigned long x); 18phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
14 19
@@ -25,6 +30,13 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
25 pr_warning("Failed to allocate identity pmd.\n"); 30 pr_warning("Failed to allocate identity pmd.\n");
26 return; 31 return;
27 } 32 }
33 /*
34 * Copy the original PMD to ensure that the PMD entries for
35 * the kernel image are preserved.
36 */
37 if (!pud_none(*pud))
38 memcpy(pmd, pmd_offset(pud, 0),
39 PTRS_PER_PMD * sizeof(pmd_t));
28 pud_populate(&init_mm, pud, pmd); 40 pud_populate(&init_mm, pud, pmd);
29 pmd += pmd_index(addr); 41 pmd += pmd_index(addr);
30 } else 42 } else
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ab14b79b03f0..6e3ba8d112a2 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1406,8 +1406,8 @@ void __init early_paging_init(const struct machine_desc *mdesc,
1406 return; 1406 return;
1407 1407
1408 /* remap kernel code and data */ 1408 /* remap kernel code and data */
1409 map_start = init_mm.start_code; 1409 map_start = init_mm.start_code & PMD_MASK;
1410 map_end = init_mm.brk; 1410 map_end = ALIGN(init_mm.brk, PMD_SIZE);
1411 1411
1412 /* get a handle on things... */ 1412 /* get a handle on things... */
1413 pgd0 = pgd_offset_k(0); 1413 pgd0 = pgd_offset_k(0);
@@ -1442,7 +1442,7 @@ void __init early_paging_init(const struct machine_desc *mdesc,
1442 } 1442 }
1443 1443
1444 /* remap pmds for kernel mapping */ 1444 /* remap pmds for kernel mapping */
1445 phys = __pa(map_start) & PMD_MASK; 1445 phys = __pa(map_start);
1446 do { 1446 do {
1447 *pmdk++ = __pmd(phys | pmdprot); 1447 *pmdk++ = __pmd(phys | pmdprot);
1448 phys += PMD_SIZE; 1448 phys += PMD_SIZE;
diff --git a/arch/arm/xen/grant-table.c b/arch/arm/xen/grant-table.c
index 859a9bb002d5..91cf08ba1e95 100644
--- a/arch/arm/xen/grant-table.c
+++ b/arch/arm/xen/grant-table.c
@@ -51,3 +51,8 @@ int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
51{ 51{
52 return -ENOSYS; 52 return -ENOSYS;
53} 53}
54
55int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
56{
57 return 0;
58}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a474de346be6..839f48c26ef0 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -4,6 +4,7 @@ config ARM64
4 select ARCH_HAS_OPP 4 select ARCH_HAS_OPP
5 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 5 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
6 select ARCH_USE_CMPXCHG_LOCKREF 6 select ARCH_USE_CMPXCHG_LOCKREF
7 select ARCH_SUPPORTS_ATOMIC_RMW
7 select ARCH_WANT_OPTIONAL_GPIOLIB 8 select ARCH_WANT_OPTIONAL_GPIOLIB
8 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 9 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
9 select ARCH_WANT_FRAME_POINTERS 10 select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 60f2f4c12256..79cd911ef88c 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -106,7 +106,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
106 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 106 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
107 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 107 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
108 (u8 *)ctx->key_enc, rounds, blocks, first); 108 (u8 *)ctx->key_enc, rounds, blocks, first);
109 err = blkcipher_walk_done(desc, &walk, 0); 109 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
110 } 110 }
111 kernel_neon_end(); 111 kernel_neon_end();
112 return err; 112 return err;
@@ -128,7 +128,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
128 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 128 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
129 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 129 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
130 (u8 *)ctx->key_dec, rounds, blocks, first); 130 (u8 *)ctx->key_dec, rounds, blocks, first);
131 err = blkcipher_walk_done(desc, &walk, 0); 131 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
132 } 132 }
133 kernel_neon_end(); 133 kernel_neon_end();
134 return err; 134 return err;
@@ -151,7 +151,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
151 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 151 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
152 (u8 *)ctx->key_enc, rounds, blocks, walk.iv, 152 (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
153 first); 153 first);
154 err = blkcipher_walk_done(desc, &walk, 0); 154 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
155 } 155 }
156 kernel_neon_end(); 156 kernel_neon_end();
157 return err; 157 return err;
@@ -174,7 +174,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
174 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 174 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
175 (u8 *)ctx->key_dec, rounds, blocks, walk.iv, 175 (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
176 first); 176 first);
177 err = blkcipher_walk_done(desc, &walk, 0); 177 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
178 } 178 }
179 kernel_neon_end(); 179 kernel_neon_end();
180 return err; 180 return err;
@@ -243,7 +243,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
243 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 243 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
244 (u8 *)ctx->key1.key_enc, rounds, blocks, 244 (u8 *)ctx->key1.key_enc, rounds, blocks,
245 (u8 *)ctx->key2.key_enc, walk.iv, first); 245 (u8 *)ctx->key2.key_enc, walk.iv, first);
246 err = blkcipher_walk_done(desc, &walk, 0); 246 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
247 } 247 }
248 kernel_neon_end(); 248 kernel_neon_end();
249 249
@@ -267,7 +267,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
267 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 267 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
268 (u8 *)ctx->key1.key_dec, rounds, blocks, 268 (u8 *)ctx->key1.key_dec, rounds, blocks,
269 (u8 *)ctx->key2.key_enc, walk.iv, first); 269 (u8 *)ctx->key2.key_enc, walk.iv, first);
270 err = blkcipher_walk_done(desc, &walk, 0); 270 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
271 } 271 }
272 kernel_neon_end(); 272 kernel_neon_end();
273 273
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index 60e98a639ac5..e786e6cdc400 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -12,8 +12,6 @@
12#include <linux/efi.h> 12#include <linux/efi.h>
13#include <linux/libfdt.h> 13#include <linux/libfdt.h>
14#include <asm/sections.h> 14#include <asm/sections.h>
15#include <generated/compile.h>
16#include <generated/utsrelease.h>
17 15
18/* 16/*
19 * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from 17 * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f43db8a69262..e90c5426fe14 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -60,6 +60,17 @@ static int __init early_initrd(char *p)
60early_param("initrd", early_initrd); 60early_param("initrd", early_initrd);
61#endif 61#endif
62 62
63/*
64 * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
65 * currently assumes that for memory starting above 4G, 32-bit devices will
66 * use a DMA offset.
67 */
68static phys_addr_t max_zone_dma_phys(void)
69{
70 phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
71 return min(offset + (1ULL << 32), memblock_end_of_DRAM());
72}
73
63static void __init zone_sizes_init(unsigned long min, unsigned long max) 74static void __init zone_sizes_init(unsigned long min, unsigned long max)
64{ 75{
65 struct memblock_region *reg; 76 struct memblock_region *reg;
@@ -70,9 +81,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
70 81
71 /* 4GB maximum for 32-bit only capable devices */ 82 /* 4GB maximum for 32-bit only capable devices */
72 if (IS_ENABLED(CONFIG_ZONE_DMA)) { 83 if (IS_ENABLED(CONFIG_ZONE_DMA)) {
73 unsigned long max_dma_phys = 84 max_dma = PFN_DOWN(max_zone_dma_phys());
74 (unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
75 max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
76 zone_size[ZONE_DMA] = max_dma - min; 85 zone_size[ZONE_DMA] = max_dma - min;
77 } 86 }
78 zone_size[ZONE_NORMAL] = max - max_dma; 87 zone_size[ZONE_NORMAL] = max - max_dma;
@@ -146,7 +155,7 @@ void __init arm64_memblock_init(void)
146 155
147 /* 4GB maximum for 32-bit only capable devices */ 156 /* 4GB maximum for 32-bit only capable devices */
148 if (IS_ENABLED(CONFIG_ZONE_DMA)) 157 if (IS_ENABLED(CONFIG_ZONE_DMA))
149 dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1; 158 dma_phys_limit = max_zone_dma_phys();
150 dma_contiguous_reserve(dma_phys_limit); 159 dma_contiguous_reserve(dma_phys_limit);
151 160
152 memblock_allow_resize(); 161 memblock_allow_resize();
diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
index a7e9bfd84183..fcec5ce71392 100644
--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
@@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y
102CONFIG_I2C_BLACKFIN_TWI=y 102CONFIG_I2C_BLACKFIN_TWI=y
103CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 103CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
104CONFIG_SPI=y 104CONFIG_SPI=y
105CONFIG_SPI_BFIN_V3=y 105CONFIG_SPI_ADI_V3=y
106CONFIG_GPIOLIB=y 106CONFIG_GPIOLIB=y
107CONFIG_GPIO_SYSFS=y 107CONFIG_GPIO_SYSFS=y
108# CONFIG_HWMON is not set 108# CONFIG_HWMON is not set
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index ba35864b2b74..c9eec84aa258 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -145,7 +145,7 @@ SECTIONS
145 145
146 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) 146 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
147#else 147#else
148 .init.data : AT(__data_lma + __data_len) 148 .init.data : AT(__data_lma + __data_len + 32)
149 { 149 {
150 __sinitdata = .; 150 __sinitdata = .;
151 INIT_DATA 151 INIT_DATA
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 63b0e4fe760c..0ccf0cf4daaf 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -20,6 +20,7 @@
20#include <linux/spi/spi.h> 20#include <linux/spi/spi.h>
21#include <linux/spi/flash.h> 21#include <linux/spi/flash.h>
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <linux/gpio.h>
23#include <linux/i2c.h> 24#include <linux/i2c.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index c65c6dbda3da..1e7290ef3525 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -21,6 +21,7 @@
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/gpio.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
26#include <asm/portmux.h> 27#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index af58454b4bff..c7495dc74690 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -21,6 +21,7 @@
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/gpio.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
26#include <asm/portmux.h> 27#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index a0211225748d..6b988ad653d8 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -21,6 +21,7 @@
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/gpio.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
26#include <asm/portmux.h> 27#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 90138e6112c1..1fe7ff286619 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
2118 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), 2118 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
2119 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), 2119 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
2120 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"), 2120 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"),
2121 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", NULL, "ppi0_24b"), 2121 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
2122 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), 2122 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
2123 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), 2123 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
2124 PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"), 2124 PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"),
@@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
2140 PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"), 2140 PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"),
2141#endif 2141#endif
2142 PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"), 2142 PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"),
2143 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", NULL, "keys_4x4"), 2143 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
2144 PIN_MAP_MUX_GROUP("bf54x-keys", "4bit", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
2145 PIN_MAP_MUX_GROUP("bf54x-keys", "8bit", "pinctrl-adi2.0", "keys_8x8grp", "keys"),
2144}; 2146};
2145 2147
2146static int __init ezkit_init(void) 2148static int __init ezkit_init(void)
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 430b16d5ccb1..6ab951534d79 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -44,6 +44,7 @@
44#include <linux/spi/flash.h> 44#include <linux/spi/flash.h>
45#include <linux/irq.h> 45#include <linux/irq.h>
46#include <linux/interrupt.h> 46#include <linux/interrupt.h>
47#include <linux/gpio.h>
47#include <linux/jiffies.h> 48#include <linux/jiffies.h>
48#include <linux/i2c-pca-platform.h> 49#include <linux/i2c-pca-platform.h>
49#include <linux/delay.h> 50#include <linux/delay.h>
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 9f777df4cacc..e862f7823e68 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -18,6 +18,7 @@
18#endif 18#endif
19#include <linux/ata_platform.h> 19#include <linux/ata_platform.h>
20#include <linux/irq.h> 20#include <linux/irq.h>
21#include <linux/gpio.h>
21#include <asm/dma.h> 22#include <asm/dma.h>
22#include <asm/bfin5xx_spi.h> 23#include <asm/bfin5xx_spi.h>
23#include <asm/portmux.h> 24#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 88dee43e7abe..2de71e8c104b 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -14,6 +14,7 @@
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <linux/irq.h> 15#include <linux/irq.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/gpio.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <asm/dma.h> 19#include <asm/dma.h>
19#include <asm/bfin5xx_spi.h> 20#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index 1ba4600de69f..e2c0b024ce88 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
698{ 698{
699#define CONFIG_SMC_GCTL_VAL 0x00000010 699#define CONFIG_SMC_GCTL_VAL 0x00000010
700 700
701 if (!devm_pinctrl_get_select_default(&pdev->dev))
702 return -EBUSY;
703 bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL); 701 bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
704 bfin_write32(SMC_B0CTL, 0x01002011); 702 bfin_write32(SMC_B0CTL, 0x01002011);
705 bfin_write32(SMC_B0TIM, 0x08170977); 703 bfin_write32(SMC_B0TIM, 0x08170977);
@@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
709 707
710void bf609_nor_flash_exit(struct platform_device *pdev) 708void bf609_nor_flash_exit(struct platform_device *pdev)
711{ 709{
712 devm_pinctrl_put(pdev->dev.pins->p);
713 bfin_write32(SMC_GCTL, 0); 710 bfin_write32(SMC_GCTL, 0);
714} 711}
715 712
@@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
2058 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), 2055 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
2059 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), 2056 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
2060 PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"), 2057 PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"),
2061 PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"), 2058 PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
2062 PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), 2059 PIN_MAP_MUX_GROUP("bfin_display.0", "8bit", "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
2063#if IS_ENABLED(CONFIG_VIDEO_MT9M114) 2060 PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
2064 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"), 2061 PIN_MAP_MUX_GROUP("bfin_display.0", "16bit", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
2065#elif IS_ENABLED(CONFIG_VIDEO_VS6624) 2062 PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit", "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
2066 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), 2063 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
2067#else 2064 PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
2068 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"), 2065 PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
2069#endif
2070 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), 2066 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
2071 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), 2067 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
2072 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"), 2068 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"),
diff --git a/arch/blackfin/mach-bf609/include/mach/pm.h b/arch/blackfin/mach-bf609/include/mach/pm.h
index 3ca0fb965636..a1efd936dd30 100644
--- a/arch/blackfin/mach-bf609/include/mach/pm.h
+++ b/arch/blackfin/mach-bf609/include/mach/pm.h
@@ -10,6 +10,7 @@
10#define __MACH_BF609_PM_H__ 10#define __MACH_BF609_PM_H__
11 11
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <linux/platform_device.h>
13 14
14extern int bfin609_pm_enter(suspend_state_t state); 15extern int bfin609_pm_enter(suspend_state_t state);
15extern int bf609_pm_prepare(void); 16extern int bf609_pm_prepare(void);
@@ -19,6 +20,6 @@ void bf609_hibernate(void);
19void bfin_sec_raise_irq(unsigned int sid); 20void bfin_sec_raise_irq(unsigned int sid);
20void coreb_enable(void); 21void coreb_enable(void);
21 22
22int bf609_nor_flash_init(void); 23int bf609_nor_flash_init(struct platform_device *pdev);
23void bf609_nor_flash_exit(void); 24void bf609_nor_flash_exit(struct platform_device *pdev);
24#endif 25#endif
diff --git a/arch/blackfin/mach-bf609/pm.c b/arch/blackfin/mach-bf609/pm.c
index 0cdd6955c7be..b1bfcf434d16 100644
--- a/arch/blackfin/mach-bf609/pm.c
+++ b/arch/blackfin/mach-bf609/pm.c
@@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
291#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 291#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
292static int smc_pm_syscore_suspend(void) 292static int smc_pm_syscore_suspend(void)
293{ 293{
294 bf609_nor_flash_exit(); 294 bf609_nor_flash_exit(NULL);
295 return 0; 295 return 0;
296} 296}
297 297
298static void smc_pm_syscore_resume(void) 298static void smc_pm_syscore_resume(void)
299{ 299{
300 bf609_nor_flash_init(); 300 bf609_nor_flash_init(NULL);
301} 301}
302 302
303static struct syscore_ops smc_pm_syscore_ops = { 303static struct syscore_ops smc_pm_syscore_ops = {
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 867b7cef204c..1f94784eab6d 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1208,8 +1208,6 @@ int __init init_arch_irq(void)
1208 1208
1209 bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority); 1209 bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1210 1210
1211 bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1212
1213 /* Enable interrupts IVG7-15 */ 1211 /* Enable interrupts IVG7-15 */
1214 bfin_irq_flags |= IMASK_IVG15 | 1212 bfin_irq_flags |= IMASK_IVG15 |
1215 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | 1213 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index a2fa297196bc..f5645d6a89f2 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -69,8 +69,6 @@
69#define SA_NOMASK SA_NODEFER 69#define SA_NOMASK SA_NODEFER
70#define SA_ONESHOT SA_RESETHAND 70#define SA_ONESHOT SA_RESETHAND
71 71
72#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
73
74#define MINSIGSTKSZ 2048 72#define MINSIGSTKSZ 2048
75#define SIGSTKSZ 8192 73#define SIGSTKSZ 8192
76 74
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index ae085ad0fba0..0bef864264c0 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -728,7 +728,6 @@ static void __init pagetable_init(void)
728#endif 728#endif
729 729
730 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); 730 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
731 memset(empty_zero_page, 0, PAGE_SIZE);
732} 731}
733 732
734static void __init gateway_init(void) 733static void __init gateway_init(void)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index fefe7c8bf05f..80b94b0add1f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -145,6 +145,7 @@ config PPC
145 select HAVE_IRQ_EXIT_ON_IRQ_STACK 145 select HAVE_IRQ_EXIT_ON_IRQ_STACK
146 select ARCH_USE_CMPXCHG_LOCKREF if PPC64 146 select ARCH_USE_CMPXCHG_LOCKREF if PPC64
147 select HAVE_ARCH_AUDITSYSCALL 147 select HAVE_ARCH_AUDITSYSCALL
148 select ARCH_SUPPORTS_ATOMIC_RMW
148 149
149config GENERIC_CSUM 150config GENERIC_CSUM
150 def_bool CPU_LITTLE_ENDIAN 151 def_bool CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index bc2347774f0a..0fdd7eece6d9 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -447,6 +447,7 @@ extern const char *powerpc_base_platform;
447 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ 447 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
448 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP) 448 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
449#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) 449#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
450#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
450#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 451#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
451 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 452 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
452 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 453 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index fddb72b48ce9..d645428a65a4 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
198 return rb; 198 return rb;
199} 199}
200 200
201static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) 201static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
202 bool is_base_size)
202{ 203{
204
203 int size, a_psize; 205 int size, a_psize;
204 /* Look at the 8 bit LP value */ 206 /* Look at the 8 bit LP value */
205 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); 207 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
@@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
214 continue; 216 continue;
215 217
216 a_psize = __hpte_actual_psize(lp, size); 218 a_psize = __hpte_actual_psize(lp, size);
217 if (a_psize != -1) 219 if (a_psize != -1) {
220 if (is_base_size)
221 return 1ul << mmu_psize_defs[size].shift;
218 return 1ul << mmu_psize_defs[a_psize].shift; 222 return 1ul << mmu_psize_defs[a_psize].shift;
223 }
219 } 224 }
220 225
221 } 226 }
222 return 0; 227 return 0;
223} 228}
224 229
230static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
231{
232 return __hpte_page_size(h, l, 0);
233}
234
235static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
236{
237 return __hpte_page_size(h, l, 1);
238}
239
225static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) 240static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
226{ 241{
227 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; 242 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 807014dde821..c2b4dcf23d03 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -22,6 +22,7 @@
22 */ 22 */
23#include <asm/pgtable-ppc64.h> 23#include <asm/pgtable-ppc64.h>
24#include <asm/bug.h> 24#include <asm/bug.h>
25#include <asm/processor.h>
25 26
26/* 27/*
27 * Segment table 28 * Segment table
@@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);
496 */ 497 */
497struct subpage_prot_table { 498struct subpage_prot_table {
498 unsigned long maxaddr; /* only addresses < this are protected */ 499 unsigned long maxaddr; /* only addresses < this are protected */
499 unsigned int **protptrs[2]; 500 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
500 unsigned int *low_prot[4]; 501 unsigned int *low_prot[4];
501}; 502};
502 503
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 9ea266eae33e..7e4612528546 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -277,6 +277,8 @@ n:
277 .globl n; \ 277 .globl n; \
278n: 278n:
279 279
280#define _GLOBAL_TOC(name) _GLOBAL(name)
281
280#define _KPROBE(n) \ 282#define _KPROBE(n) \
281 .section ".kprobes.text","a"; \ 283 .section ".kprobes.text","a"; \
282 .globl n; \ 284 .globl n; \
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 965291b4c2fa..0c157642c2a1 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
527 .machine_check_early = __machine_check_early_realmode_p8, 527 .machine_check_early = __machine_check_early_realmode_p8,
528 .platform = "power8", 528 .platform = "power8",
529 }, 529 },
530 { /* Power8 DD1: Does not support doorbell IPIs */
531 .pvr_mask = 0xffffff00,
532 .pvr_value = 0x004d0100,
533 .cpu_name = "POWER8 (raw)",
534 .cpu_features = CPU_FTRS_POWER8_DD1,
535 .cpu_user_features = COMMON_USER_POWER8,
536 .cpu_user_features2 = COMMON_USER2_POWER8,
537 .mmu_features = MMU_FTRS_POWER8,
538 .icache_bsize = 128,
539 .dcache_bsize = 128,
540 .num_pmcs = 6,
541 .pmc_type = PPC_PMC_IBM,
542 .oprofile_cpu_type = "ppc64/power8",
543 .oprofile_type = PPC_OPROFILE_INVALID,
544 .cpu_setup = __setup_cpu_power8,
545 .cpu_restore = __restore_cpu_power8,
546 .flush_tlb = __flush_tlb_power8,
547 .machine_check_early = __machine_check_early_realmode_p8,
548 .platform = "power8",
549 },
530 { /* Power8 */ 550 { /* Power8 */
531 .pvr_mask = 0xffff0000, 551 .pvr_mask = 0xffff0000,
532 .pvr_value = 0x004d0000, 552 .pvr_value = 0x004d0000,
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 658e89d2025b..db2b482af658 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -611,17 +611,19 @@ static void rtas_flash_firmware(int reboot_type)
611 for (f = flist; f; f = next) { 611 for (f = flist; f; f = next) {
612 /* Translate data addrs to absolute */ 612 /* Translate data addrs to absolute */
613 for (i = 0; i < f->num_blocks; i++) { 613 for (i = 0; i < f->num_blocks; i++) {
614 f->blocks[i].data = (char *)__pa(f->blocks[i].data); 614 f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
615 image_size += f->blocks[i].length; 615 image_size += f->blocks[i].length;
616 f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
616 } 617 }
617 next = f->next; 618 next = f->next;
618 /* Don't translate NULL pointer for last entry */ 619 /* Don't translate NULL pointer for last entry */
619 if (f->next) 620 if (f->next)
620 f->next = (struct flash_block_list *)__pa(f->next); 621 f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
621 else 622 else
622 f->next = NULL; 623 f->next = NULL;
623 /* make num_blocks into the version/length field */ 624 /* make num_blocks into the version/length field */
624 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16); 625 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
626 f->num_blocks = cpu_to_be64(f->num_blocks);
625 } 627 }
626 628
627 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size); 629 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 51a3ff78838a..1007fb802e6b 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -747,7 +747,7 @@ int setup_profiling_timer(unsigned int multiplier)
747 747
748#ifdef CONFIG_SCHED_SMT 748#ifdef CONFIG_SCHED_SMT
749/* cpumask of CPUs with asymetric SMT dependancy */ 749/* cpumask of CPUs with asymetric SMT dependancy */
750static const int powerpc_smt_flags(void) 750static int powerpc_smt_flags(void)
751{ 751{
752 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 752 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
753 753
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 80561074078d..68468d695f12 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1562 goto out; 1562 goto out;
1563 } 1563 }
1564 if (!rma_setup && is_vrma_hpte(v)) { 1564 if (!rma_setup && is_vrma_hpte(v)) {
1565 unsigned long psize = hpte_page_size(v, r); 1565 unsigned long psize = hpte_base_page_size(v, r);
1566 unsigned long senc = slb_pgsize_encoding(psize); 1566 unsigned long senc = slb_pgsize_encoding(psize);
1567 unsigned long lpcr; 1567 unsigned long lpcr;
1568 1568
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6e6224318c36..5a24d3c2b6b8 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
814 r = hpte[i+1]; 814 r = hpte[i+1];
815 815
816 /* 816 /*
817 * Check the HPTE again, including large page size 817 * Check the HPTE again, including base page size
818 * Since we don't currently allow any MPSS (mixed
819 * page-size segment) page sizes, it is sufficient
820 * to check against the actual page size.
821 */ 818 */
822 if ((v & valid) && (v & mask) == val && 819 if ((v & valid) && (v & mask) == val &&
823 hpte_page_size(v, r) == (1ul << pshift)) 820 hpte_base_page_size(v, r) == (1ul << pshift))
824 /* Return with the HPTE still locked */ 821 /* Return with the HPTE still locked */
825 return (hash << 3) + (i >> 1); 822 return (hash << 3) + (i >> 1);
826 823
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 868347ef09fd..558a67df8126 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -48,7 +48,7 @@
48 * 48 *
49 * LR = return address to continue at after eventually re-enabling MMU 49 * LR = return address to continue at after eventually re-enabling MMU
50 */ 50 */
51_GLOBAL(kvmppc_hv_entry_trampoline) 51_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
52 mflr r0 52 mflr r0
53 std r0, PPC_LR_STKOFF(r1) 53 std r0, PPC_LR_STKOFF(r1)
54 stdu r1, -112(r1) 54 stdu r1, -112(r1)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index e2c29e381dc7..d044b8b7c69d 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,7 +25,11 @@
25#include <asm/exception-64s.h> 25#include <asm/exception-64s.h>
26 26
27#if defined(CONFIG_PPC_BOOK3S_64) 27#if defined(CONFIG_PPC_BOOK3S_64)
28#if defined(_CALL_ELF) && _CALL_ELF == 2
29#define FUNC(name) name
30#else
28#define FUNC(name) GLUE(.,name) 31#define FUNC(name) GLUE(.,name)
32#endif
29#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU 33#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
30 34
31#elif defined(CONFIG_PPC_BOOK3S_32) 35#elif defined(CONFIG_PPC_BOOK3S_32)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 9eec675220e6..16c4d88ba27d 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,7 +36,11 @@
36 36
37#if defined(CONFIG_PPC_BOOK3S_64) 37#if defined(CONFIG_PPC_BOOK3S_64)
38 38
39#if defined(_CALL_ELF) && _CALL_ELF == 2
40#define FUNC(name) name
41#else
39#define FUNC(name) GLUE(.,name) 42#define FUNC(name) GLUE(.,name)
43#endif
40 44
41#elif defined(CONFIG_PPC_BOOK3S_32) 45#elif defined(CONFIG_PPC_BOOK3S_32)
42 46
@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
146 * On entry, r4 contains the guest shadow MSR 150 * On entry, r4 contains the guest shadow MSR
147 * MSR.EE has to be 0 when calling this function 151 * MSR.EE has to be 0 when calling this function
148 */ 152 */
149_GLOBAL(kvmppc_entry_trampoline) 153_GLOBAL_TOC(kvmppc_entry_trampoline)
150 mfmsr r5 154 mfmsr r5
151 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) 155 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
152 toreal(r7) 156 toreal(r7)
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index edb14ba992b3..ef27fbd5d9c5 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
23 u32 irq, server, priority; 23 u32 irq, server, priority;
24 int rc; 24 int rc;
25 25
26 if (args->nargs != 3 || args->nret != 1) { 26 if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
27 rc = -3; 27 rc = -3;
28 goto out; 28 goto out;
29 } 29 }
30 30
31 irq = args->args[0]; 31 irq = be32_to_cpu(args->args[0]);
32 server = args->args[1]; 32 server = be32_to_cpu(args->args[1]);
33 priority = args->args[2]; 33 priority = be32_to_cpu(args->args[2]);
34 34
35 rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); 35 rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
36 if (rc) 36 if (rc)
37 rc = -3; 37 rc = -3;
38out: 38out:
39 args->rets[0] = rc; 39 args->rets[0] = cpu_to_be32(rc);
40} 40}
41 41
42static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) 42static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
44 u32 irq, server, priority; 44 u32 irq, server, priority;
45 int rc; 45 int rc;
46 46
47 if (args->nargs != 1 || args->nret != 3) { 47 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
48 rc = -3; 48 rc = -3;
49 goto out; 49 goto out;
50 } 50 }
51 51
52 irq = args->args[0]; 52 irq = be32_to_cpu(args->args[0]);
53 53
54 server = priority = 0; 54 server = priority = 0;
55 rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); 55 rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
58 goto out; 58 goto out;
59 } 59 }
60 60
61 args->rets[1] = server; 61 args->rets[1] = cpu_to_be32(server);
62 args->rets[2] = priority; 62 args->rets[2] = cpu_to_be32(priority);
63out: 63out:
64 args->rets[0] = rc; 64 args->rets[0] = cpu_to_be32(rc);
65} 65}
66 66
67static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) 67static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
69 u32 irq; 69 u32 irq;
70 int rc; 70 int rc;
71 71
72 if (args->nargs != 1 || args->nret != 1) { 72 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
73 rc = -3; 73 rc = -3;
74 goto out; 74 goto out;
75 } 75 }
76 76
77 irq = args->args[0]; 77 irq = be32_to_cpu(args->args[0]);
78 78
79 rc = kvmppc_xics_int_off(vcpu->kvm, irq); 79 rc = kvmppc_xics_int_off(vcpu->kvm, irq);
80 if (rc) 80 if (rc)
81 rc = -3; 81 rc = -3;
82out: 82out:
83 args->rets[0] = rc; 83 args->rets[0] = cpu_to_be32(rc);
84} 84}
85 85
86static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) 86static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
88 u32 irq; 88 u32 irq;
89 int rc; 89 int rc;
90 90
91 if (args->nargs != 1 || args->nret != 1) { 91 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
92 rc = -3; 92 rc = -3;
93 goto out; 93 goto out;
94 } 94 }
95 95
96 irq = args->args[0]; 96 irq = be32_to_cpu(args->args[0]);
97 97
98 rc = kvmppc_xics_int_on(vcpu->kvm, irq); 98 rc = kvmppc_xics_int_on(vcpu->kvm, irq);
99 if (rc) 99 if (rc)
100 rc = -3; 100 rc = -3;
101out: 101out:
102 args->rets[0] = rc; 102 args->rets[0] = cpu_to_be32(rc);
103} 103}
104#endif /* CONFIG_KVM_XICS */ 104#endif /* CONFIG_KVM_XICS */
105 105
@@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
205 return rc; 205 return rc;
206} 206}
207 207
208static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
209{
210#ifdef __LITTLE_ENDIAN__
211 int i;
212
213 args->token = be32_to_cpu(args->token);
214 args->nargs = be32_to_cpu(args->nargs);
215 args->nret = be32_to_cpu(args->nret);
216 for (i = 0; i < args->nargs; i++)
217 args->args[i] = be32_to_cpu(args->args[i]);
218#endif
219}
220
221static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
222{
223#ifdef __LITTLE_ENDIAN__
224 int i;
225
226 for (i = 0; i < args->nret; i++)
227 args->args[i] = cpu_to_be32(args->args[i]);
228 args->token = cpu_to_be32(args->token);
229 args->nargs = cpu_to_be32(args->nargs);
230 args->nret = cpu_to_be32(args->nret);
231#endif
232}
233
234int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) 208int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
235{ 209{
236 struct rtas_token_definition *d; 210 struct rtas_token_definition *d;
@@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
249 if (rc) 223 if (rc)
250 goto fail; 224 goto fail;
251 225
252 kvmppc_rtas_swap_endian_in(&args);
253
254 /* 226 /*
255 * args->rets is a pointer into args->args. Now that we've 227 * args->rets is a pointer into args->args. Now that we've
256 * copied args we need to fix it up to point into our copy, 228 * copied args we need to fix it up to point into our copy,
@@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
258 * value so we can restore it on the way out. 230 * value so we can restore it on the way out.
259 */ 231 */
260 orig_rets = args.rets; 232 orig_rets = args.rets;
261 args.rets = &args.args[args.nargs]; 233 args.rets = &args.args[be32_to_cpu(args.nargs)];
262 234
263 mutex_lock(&vcpu->kvm->lock); 235 mutex_lock(&vcpu->kvm->lock);
264 236
265 rc = -ENOENT; 237 rc = -ENOENT;
266 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { 238 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
267 if (d->token == args.token) { 239 if (d->token == be32_to_cpu(args.token)) {
268 d->handler->handler(vcpu, &args); 240 d->handler->handler(vcpu, &args);
269 rc = 0; 241 rc = 0;
270 break; 242 break;
@@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
275 247
276 if (rc == 0) { 248 if (rc == 0) {
277 args.rets = orig_rets; 249 args.rets = orig_rets;
278 kvmppc_rtas_swap_endian_out(&args);
279 rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); 250 rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
280 if (rc) 251 if (rc)
281 goto fail; 252 goto fail;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index dd2cc03f406f..86903d3f5a03 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
473 if (printk_ratelimit()) 473 if (printk_ratelimit())
474 pr_err("%s: pte not present: gfn %lx, pfn %lx\n", 474 pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
475 __func__, (long)gfn, pfn); 475 __func__, (long)gfn, pfn);
476 return -EINVAL; 476 ret = -EINVAL;
477 goto out;
477 } 478 }
478 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); 479 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
479 480
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 0738f96befbf..43435c6892fb 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -77,7 +77,7 @@ _GLOBAL(memset)
77 stb r4,0(r6) 77 stb r4,0(r6)
78 blr 78 blr
79 79
80_GLOBAL(memmove) 80_GLOBAL_TOC(memmove)
81 cmplw 0,r3,r4 81 cmplw 0,r3,r4
82 bgt backwards_memcpy 82 bgt backwards_memcpy
83 b memcpy 83 b memcpy
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 412dd46dd0b7..5c09f365c842 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1198 sh = regs->gpr[rb] & 0x3f; 1198 sh = regs->gpr[rb] & 0x3f;
1199 ival = (signed int) regs->gpr[rd]; 1199 ival = (signed int) regs->gpr[rd];
1200 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); 1200 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1201 if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0)) 1201 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1202 regs->xer |= XER_CA; 1202 regs->xer |= XER_CA;
1203 else 1203 else
1204 regs->xer &= ~XER_CA; 1204 regs->xer &= ~XER_CA;
@@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1208 sh = rb; 1208 sh = rb;
1209 ival = (signed int) regs->gpr[rd]; 1209 ival = (signed int) regs->gpr[rd];
1210 regs->gpr[ra] = ival >> sh; 1210 regs->gpr[ra] = ival >> sh;
1211 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) 1211 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1212 regs->xer |= XER_CA; 1212 regs->xer |= XER_CA;
1213 else 1213 else
1214 regs->xer &= ~XER_CA; 1214 regs->xer &= ~XER_CA;
@@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1216 1216
1217#ifdef __powerpc64__ 1217#ifdef __powerpc64__
1218 case 27: /* sld */ 1218 case 27: /* sld */
1219 sh = regs->gpr[rd] & 0x7f; 1219 sh = regs->gpr[rb] & 0x7f;
1220 if (sh < 64) 1220 if (sh < 64)
1221 regs->gpr[ra] = regs->gpr[rd] << sh; 1221 regs->gpr[ra] = regs->gpr[rd] << sh;
1222 else 1222 else
@@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1235 sh = regs->gpr[rb] & 0x7f; 1235 sh = regs->gpr[rb] & 0x7f;
1236 ival = (signed long int) regs->gpr[rd]; 1236 ival = (signed long int) regs->gpr[rd];
1237 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); 1237 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1238 if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0)) 1238 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1239 regs->xer |= XER_CA; 1239 regs->xer |= XER_CA;
1240 else 1240 else
1241 regs->xer &= ~XER_CA; 1241 regs->xer &= ~XER_CA;
@@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1246 sh = rb | ((instr & 2) << 4); 1246 sh = rb | ((instr & 2) << 4);
1247 ival = (signed long int) regs->gpr[rd]; 1247 ival = (signed long int) regs->gpr[rd];
1248 regs->gpr[ra] = ival >> sh; 1248 regs->gpr[ra] = ival >> sh;
1249 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) 1249 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1250 regs->xer |= XER_CA; 1250 regs->xer |= XER_CA;
1251 else 1251 else
1252 regs->xer &= ~XER_CA; 1252 regs->xer &= ~XER_CA;
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 6dcdadefd8d0..82e82cadcde5 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -390,12 +390,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
390 case BPF_ANC | SKF_AD_VLAN_TAG: 390 case BPF_ANC | SKF_AD_VLAN_TAG:
391 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: 391 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
392 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 392 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
393 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
394
393 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 395 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
394 vlan_tci)); 396 vlan_tci));
395 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) 397 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
396 PPC_ANDI(r_A, r_A, VLAN_VID_MASK); 398 PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
397 else 399 } else {
398 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); 400 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
401 PPC_SRWI(r_A, r_A, 12);
402 }
399 break; 403 break;
400 case BPF_ANC | SKF_AD_QUEUE: 404 case BPF_ANC | SKF_AD_QUEUE:
401 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 405 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 6b0641c3f03f..fe52db2eea6a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1307,6 +1307,9 @@ static void power_pmu_enable(struct pmu *pmu)
1307 out_enable: 1307 out_enable:
1308 pmao_restore_workaround(ebb); 1308 pmao_restore_workaround(ebb);
1309 1309
1310 if (ppmu->flags & PPMU_ARCH_207S)
1311 mtspr(SPRN_MMCR2, 0);
1312
1310 mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); 1313 mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
1311 1314
1312 mb(); 1315 mb();
@@ -1315,9 +1318,6 @@ static void power_pmu_enable(struct pmu *pmu)
1315 1318
1316 write_mmcr0(cpuhw, mmcr0); 1319 write_mmcr0(cpuhw, mmcr0);
1317 1320
1318 if (ppmu->flags & PPMU_ARCH_207S)
1319 mtspr(SPRN_MMCR2, 0);
1320
1321 /* 1321 /*
1322 * Enable instruction sampling if necessary 1322 * Enable instruction sampling if necessary
1323 */ 1323 */
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 10268c41d830..0ad533b617f7 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -249,7 +249,7 @@ static void elog_work_fn(struct work_struct *work)
249 249
250 rc = opal_get_elog_size(&id, &size, &type); 250 rc = opal_get_elog_size(&id, &size, &type);
251 if (rc != OPAL_SUCCESS) { 251 if (rc != OPAL_SUCCESS) {
252 pr_err("ELOG: Opal log read failed\n"); 252 pr_err("ELOG: OPAL log info read failed\n");
253 return; 253 return;
254 } 254 }
255 255
@@ -257,7 +257,7 @@ static void elog_work_fn(struct work_struct *work)
257 log_id = be64_to_cpu(id); 257 log_id = be64_to_cpu(id);
258 elog_type = be64_to_cpu(type); 258 elog_type = be64_to_cpu(type);
259 259
260 BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE); 260 WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
261 261
262 if (elog_size >= OPAL_MAX_ERRLOG_SIZE) 262 if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
263 elog_size = OPAL_MAX_ERRLOG_SIZE; 263 elog_size = OPAL_MAX_ERRLOG_SIZE;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 022b38e6a80b..2d0b4d68a40a 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
86 } 86 }
87 87
88 of_node_set_flag(dn, OF_DYNAMIC); 88 of_node_set_flag(dn, OF_DYNAMIC);
89 of_node_init(dn);
89 90
90 return dn; 91 return dn;
91} 92}
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 0435bb65d0aa..1c0a60d98867 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
69 69
70 np->properties = proplist; 70 np->properties = proplist;
71 of_node_set_flag(np, OF_DYNAMIC); 71 of_node_set_flag(np, OF_DYNAMIC);
72 of_node_init(np);
72 73
73 np->parent = derive_parent(path); 74 np->parent = derive_parent(path);
74 if (IS_ERR(np->parent)) { 75 if (IS_ERR(np->parent)) {
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index df38c70cd59e..18ea9e3f8142 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc)
51 return 0; 51 return 0;
52 52
53 asm volatile( 53 asm volatile(
54 "0: lfpc %1\n" 54 " lfpc %1\n"
55 " la %0,0\n" 55 "0: la %0,0\n"
56 "1:\n" 56 "1:\n"
57 EX_TABLE(0b,1b) 57 EX_TABLE(0b,1b)
58 : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL)); 58 : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 7ba7d6784510..e88d35d74950 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -437,11 +437,11 @@ ENTRY(startup_kdump)
437 437
438#if defined(CONFIG_64BIT) 438#if defined(CONFIG_64BIT)
439#if defined(CONFIG_MARCH_ZEC12) 439#if defined(CONFIG_MARCH_ZEC12)
440 .long 3, 0xc100efea, 0xf46ce800, 0x00400000 440 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
441#elif defined(CONFIG_MARCH_Z196) 441#elif defined(CONFIG_MARCH_Z196)
442 .long 2, 0xc100efea, 0xf46c0000 442 .long 2, 0xc100eff2, 0xf46c0000
443#elif defined(CONFIG_MARCH_Z10) 443#elif defined(CONFIG_MARCH_Z10)
444 .long 2, 0xc100efea, 0xf0680000 444 .long 2, 0xc100eff2, 0xf0680000
445#elif defined(CONFIG_MARCH_Z9_109) 445#elif defined(CONFIG_MARCH_Z9_109)
446 .long 1, 0xc100efc2 446 .long 1, 0xc100efc2
447#elif defined(CONFIG_MARCH_Z990) 447#elif defined(CONFIG_MARCH_Z990)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 2d716734b5b1..5dc7ad9e2fbf 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
334 unsigned long mask = PSW_MASK_USER; 334 unsigned long mask = PSW_MASK_USER;
335 335
336 mask |= is_ri_task(child) ? PSW_MASK_RI : 0; 336 mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
337 if ((data & ~mask) != PSW_USER_BITS) 337 if ((data ^ PSW_USER_BITS) & ~mask)
338 /* Invalid psw mask. */
339 return -EINVAL;
340 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
341 /* Invalid address-space-control bits */
338 return -EINVAL; 342 return -EINVAL;
339 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) 343 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
344 /* Invalid addressing mode bits */
340 return -EINVAL; 345 return -EINVAL;
341 } 346 }
342 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; 347 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
@@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child,
672 677
673 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; 678 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
674 /* Build a 64 bit psw mask from 31 bit mask. */ 679 /* Build a 64 bit psw mask from 31 bit mask. */
675 if ((tmp & ~mask) != PSW32_USER_BITS) 680 if ((tmp ^ PSW32_USER_BITS) & ~mask)
676 /* Invalid psw mask. */ 681 /* Invalid psw mask. */
677 return -EINVAL; 682 return -EINVAL;
683 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
684 /* Invalid address-space-control bits */
685 return -EINVAL;
678 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 686 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
679 (regs->psw.mask & PSW_MASK_BA) | 687 (regs->psw.mask & PSW_MASK_BA) |
680 (__u64)(tmp & mask) << 32; 688 (__u64)(tmp & mask) << 32;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 9ddc51eeb8d6..30de42730b2f 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -48,13 +48,10 @@
48static LIST_HEAD(zpci_list); 48static LIST_HEAD(zpci_list);
49static DEFINE_SPINLOCK(zpci_list_lock); 49static DEFINE_SPINLOCK(zpci_list_lock);
50 50
51static void zpci_enable_irq(struct irq_data *data);
52static void zpci_disable_irq(struct irq_data *data);
53
54static struct irq_chip zpci_irq_chip = { 51static struct irq_chip zpci_irq_chip = {
55 .name = "zPCI", 52 .name = "zPCI",
56 .irq_unmask = zpci_enable_irq, 53 .irq_unmask = unmask_msi_irq,
57 .irq_mask = zpci_disable_irq, 54 .irq_mask = mask_msi_irq,
58}; 55};
59 56
60static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 57static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
244 return rc; 241 return rc;
245} 242}
246 243
247static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
248{
249 int offset, pos;
250 u32 mask_bits;
251
252 if (msi->msi_attrib.is_msix) {
253 offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
254 PCI_MSIX_ENTRY_VECTOR_CTRL;
255 msi->masked = readl(msi->mask_base + offset);
256 writel(flag, msi->mask_base + offset);
257 } else if (msi->msi_attrib.maskbit) {
258 pos = (long) msi->mask_base;
259 pci_read_config_dword(msi->dev, pos, &mask_bits);
260 mask_bits &= ~(mask);
261 mask_bits |= flag & mask;
262 pci_write_config_dword(msi->dev, pos, mask_bits);
263 } else
264 return 0;
265
266 msi->msi_attrib.maskbit = !!flag;
267 return 1;
268}
269
270static void zpci_enable_irq(struct irq_data *data)
271{
272 struct msi_desc *msi = irq_get_msi_desc(data->irq);
273
274 zpci_msi_set_mask_bits(msi, 1, 0);
275}
276
277static void zpci_disable_irq(struct irq_data *data)
278{
279 struct msi_desc *msi = irq_get_msi_desc(data->irq);
280
281 zpci_msi_set_mask_bits(msi, 1, 1);
282}
283
284void pcibios_fixup_bus(struct pci_bus *bus) 244void pcibios_fixup_bus(struct pci_bus *bus)
285{ 245{
286} 246}
@@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
487 447
488 /* Release MSI interrupts */ 448 /* Release MSI interrupts */
489 list_for_each_entry(msi, &pdev->msi_list, list) { 449 list_for_each_entry(msi, &pdev->msi_list, list) {
490 zpci_msi_set_mask_bits(msi, 1, 1); 450 if (msi->msi_attrib.is_msix)
451 default_msix_mask_irq(msi, 1);
452 else
453 default_msi_mask_irq(msi, 1, 1);
491 irq_set_msi_desc(msi->irq, NULL); 454 irq_set_msi_desc(msi->irq, NULL);
492 irq_free_desc(msi->irq); 455 irq_free_desc(msi->irq);
493 msi->msg.address_lo = 0; 456 msi->msg.address_lo = 0;
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index d4d16e4be07c..bf5b3f5f4962 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -32,7 +32,8 @@ endif
32 32
33cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,) 33cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
34cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \ 34cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
35 $(call cc-option,-m2a-nofpu,) 35 $(call cc-option,-m2a-nofpu,) \
36 $(call cc-option,-m4-nofpu,)
36cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,) 37cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,)
37cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \ 38cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
38 $(call cc-option,-mno-implicit-fp,-m4-nofpu) 39 $(call cc-option,-mno-implicit-fp,-m4-nofpu)
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 29f2e988c56a..407c87d9879a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -78,6 +78,7 @@ config SPARC64
78 select HAVE_C_RECORDMCOUNT 78 select HAVE_C_RECORDMCOUNT
79 select NO_BOOTMEM 79 select NO_BOOTMEM
80 select HAVE_ARCH_AUDITSYSCALL 80 select HAVE_ARCH_AUDITSYSCALL
81 select ARCH_SUPPORTS_ATOMIC_RMW
81 82
82config ARCH_DEFCONFIG 83config ARCH_DEFCONFIG
83 string 84 string
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index b73274fb961a..42f2bca1d338 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -410,8 +410,9 @@
410#define __NR_finit_module 342 410#define __NR_finit_module 342
411#define __NR_sched_setattr 343 411#define __NR_sched_setattr 343
412#define __NR_sched_getattr 344 412#define __NR_sched_getattr 344
413#define __NR_renameat2 345
413 414
414#define NR_syscalls 345 415#define NR_syscalls 346
415 416
416/* Bitmask values returned from kern_features system call. */ 417/* Bitmask values returned from kern_features system call. */
417#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 418#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index d066eb18650c..f834224208ed 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)
48SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) 48SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
49SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) 49SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
50SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) 50SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
51SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
51 52
52 .globl sys32_mmap2 53 .globl sys32_mmap2
53sys32_mmap2: 54sys32_mmap2:
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 151ace8766cc..85fe9b1087cd 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -86,3 +86,4 @@ sys_call_table:
86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
89/*345*/ .long sys_renameat2
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 4bd4e2bb26cf..33ecba2826ea 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -87,6 +87,7 @@ sys_call_table32:
87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
90 .word sys32_renameat2
90 91
91#endif /* CONFIG_COMPAT */ 92#endif /* CONFIG_COMPAT */
92 93
@@ -165,3 +166,4 @@ sys_call_table:
165/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 166/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
166 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 167 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
167/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 168/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
169 .word sys_renameat2
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 9472079471bb..f1b3eb14b855 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -12,6 +12,7 @@
12#include <mem_user.h> 12#include <mem_user.h>
13#include <os.h> 13#include <os.h>
14#include <skas.h> 14#include <skas.h>
15#include <kern_util.h>
15 16
16struct host_vm_change { 17struct host_vm_change {
17 struct host_vm_op { 18 struct host_vm_op {
@@ -124,6 +125,9 @@ static int add_munmap(unsigned long addr, unsigned long len,
124 struct host_vm_op *last; 125 struct host_vm_op *last;
125 int ret = 0; 126 int ret = 0;
126 127
128 if ((addr >= STUB_START) && (addr < STUB_END))
129 return -EINVAL;
130
127 if (hvc->index != 0) { 131 if (hvc->index != 0) {
128 last = &hvc->ops[hvc->index - 1]; 132 last = &hvc->ops[hvc->index - 1];
129 if ((last->type == MUNMAP) && 133 if ((last->type == MUNMAP) &&
@@ -283,8 +287,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
283 /* This is not an else because ret is modified above */ 287 /* This is not an else because ret is modified above */
284 if (ret) { 288 if (ret) {
285 printk(KERN_ERR "fix_range_common: failed, killing current " 289 printk(KERN_ERR "fix_range_common: failed, killing current "
286 "process\n"); 290 "process: %d\n", task_tgid_vnr(current));
291 /* We are under mmap_sem, release it such that current can terminate */
292 up_write(&current->mm->mmap_sem);
287 force_sig(SIGKILL, current); 293 force_sig(SIGKILL, current);
294 do_signal();
288 } 295 }
289} 296}
290 297
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 974b87474a99..5678c3571e7c 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -206,7 +206,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
206 int is_write = FAULT_WRITE(fi); 206 int is_write = FAULT_WRITE(fi);
207 unsigned long address = FAULT_ADDRESS(fi); 207 unsigned long address = FAULT_ADDRESS(fi);
208 208
209 if (regs) 209 if (!is_user && regs)
210 current->thread.segv_regs = container_of(regs, struct pt_regs, regs); 210 current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
211 211
212 if (!is_user && (address >= start_vm) && (address < end_vm)) { 212 if (!is_user && (address >= start_vm) && (address < end_vm)) {
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index d531879a4617..908579f2b0ab 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid)
54 54
55void wait_stub_done(int pid) 55void wait_stub_done(int pid)
56{ 56{
57 int n, status, err, bad_stop = 0; 57 int n, status, err;
58 58
59 while (1) { 59 while (1) {
60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); 60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@@ -74,8 +74,6 @@ void wait_stub_done(int pid)
74 74
75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) 75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
76 return; 76 return;
77 else
78 bad_stop = 1;
79 77
80bad_wait: 78bad_wait:
81 err = ptrace_dump_regs(pid); 79 err = ptrace_dump_regs(pid);
@@ -85,10 +83,7 @@ bad_wait:
85 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, " 83 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
86 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno, 84 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
87 status); 85 status);
88 if (bad_stop) 86 fatal_sigsegv();
89 kill(pid, SIGKILL);
90 else
91 fatal_sigsegv();
92} 87}
93 88
94extern unsigned long current_stub_stack(void); 89extern unsigned long current_stub_stack(void);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a8f749ef0fdc..d24887b645dc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -131,6 +131,7 @@ config X86
131 select HAVE_CC_STACKPROTECTOR 131 select HAVE_CC_STACKPROTECTOR
132 select GENERIC_CPU_AUTOPROBE 132 select GENERIC_CPU_AUTOPROBE
133 select HAVE_ARCH_AUDITSYSCALL 133 select HAVE_ARCH_AUDITSYSCALL
134 select ARCH_SUPPORTS_ATOMIC_RMW
134 135
135config INSTRUCTION_DECODER 136config INSTRUCTION_DECODER
136 def_bool y 137 def_bool y
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 84c223479e3c..7a6d43a554d7 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -91,10 +91,9 @@ bs_die:
91 91
92 .section ".bsdata", "a" 92 .section ".bsdata", "a"
93bugger_off_msg: 93bugger_off_msg:
94 .ascii "Direct floppy boot is not supported. " 94 .ascii "Use a boot loader.\r\n"
95 .ascii "Use a boot loader program instead.\r\n"
96 .ascii "\n" 95 .ascii "\n"
97 .ascii "Remove disk and press any key to reboot ...\r\n" 96 .ascii "Remove disk and press any key to reboot...\r\n"
98 .byte 0 97 .byte 0
99 98
100#ifdef CONFIG_EFI_STUB 99#ifdef CONFIG_EFI_STUB
@@ -108,7 +107,7 @@ coff_header:
108#else 107#else
109 .word 0x8664 # x86-64 108 .word 0x8664 # x86-64
110#endif 109#endif
111 .word 3 # nr_sections 110 .word 4 # nr_sections
112 .long 0 # TimeDateStamp 111 .long 0 # TimeDateStamp
113 .long 0 # PointerToSymbolTable 112 .long 0 # PointerToSymbolTable
114 .long 1 # NumberOfSymbols 113 .long 1 # NumberOfSymbols
@@ -250,6 +249,25 @@ section_table:
250 .word 0 # NumberOfLineNumbers 249 .word 0 # NumberOfLineNumbers
251 .long 0x60500020 # Characteristics (section flags) 250 .long 0x60500020 # Characteristics (section flags)
252 251
252 #
253 # The offset & size fields are filled in by build.c.
254 #
255 .ascii ".bss"
256 .byte 0
257 .byte 0
258 .byte 0
259 .byte 0
260 .long 0
261 .long 0x0
262 .long 0 # Size of initialized data
263 # on disk
264 .long 0x0
265 .long 0 # PointerToRelocations
266 .long 0 # PointerToLineNumbers
267 .word 0 # NumberOfRelocations
268 .word 0 # NumberOfLineNumbers
269 .long 0xc8000080 # Characteristics (section flags)
270
253#endif /* CONFIG_EFI_STUB */ 271#endif /* CONFIG_EFI_STUB */
254 272
255 # Kernel attributes; used by setup. This is part 1 of the 273 # Kernel attributes; used by setup. This is part 1 of the
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 1a2f2121cada..a7661c430cd9 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -143,7 +143,7 @@ static void usage(void)
143 143
144#ifdef CONFIG_EFI_STUB 144#ifdef CONFIG_EFI_STUB
145 145
146static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) 146static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
147{ 147{
148 unsigned int pe_header; 148 unsigned int pe_header;
149 unsigned short num_sections; 149 unsigned short num_sections;
@@ -164,10 +164,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
164 put_unaligned_le32(size, section + 0x8); 164 put_unaligned_le32(size, section + 0x8);
165 165
166 /* section header vma field */ 166 /* section header vma field */
167 put_unaligned_le32(offset, section + 0xc); 167 put_unaligned_le32(vma, section + 0xc);
168 168
169 /* section header 'size of initialised data' field */ 169 /* section header 'size of initialised data' field */
170 put_unaligned_le32(size, section + 0x10); 170 put_unaligned_le32(datasz, section + 0x10);
171 171
172 /* section header 'file offset' field */ 172 /* section header 'file offset' field */
173 put_unaligned_le32(offset, section + 0x14); 173 put_unaligned_le32(offset, section + 0x14);
@@ -179,6 +179,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
179 } 179 }
180} 180}
181 181
182static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
183{
184 update_pecoff_section_header_fields(section_name, offset, size, size, offset);
185}
186
182static void update_pecoff_setup_and_reloc(unsigned int size) 187static void update_pecoff_setup_and_reloc(unsigned int size)
183{ 188{
184 u32 setup_offset = 0x200; 189 u32 setup_offset = 0x200;
@@ -203,9 +208,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
203 208
204 pe_header = get_unaligned_le32(&buf[0x3c]); 209 pe_header = get_unaligned_le32(&buf[0x3c]);
205 210
206 /* Size of image */
207 put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
208
209 /* 211 /*
210 * Size of code: Subtract the size of the first sector (512 bytes) 212 * Size of code: Subtract the size of the first sector (512 bytes)
211 * which includes the header. 213 * which includes the header.
@@ -220,6 +222,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
220 update_pecoff_section_header(".text", text_start, text_sz); 222 update_pecoff_section_header(".text", text_start, text_sz);
221} 223}
222 224
225static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
226{
227 unsigned int pe_header;
228 unsigned int bss_sz = init_sz - file_sz;
229
230 pe_header = get_unaligned_le32(&buf[0x3c]);
231
232 /* Size of uninitialized data */
233 put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
234
235 /* Size of image */
236 put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
237
238 update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
239}
240
223static int reserve_pecoff_reloc_section(int c) 241static int reserve_pecoff_reloc_section(int c)
224{ 242{
225 /* Reserve 0x20 bytes for .reloc section */ 243 /* Reserve 0x20 bytes for .reloc section */
@@ -259,6 +277,8 @@ static void efi_stub_entry_update(void)
259static inline void update_pecoff_setup_and_reloc(unsigned int size) {} 277static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
260static inline void update_pecoff_text(unsigned int text_start, 278static inline void update_pecoff_text(unsigned int text_start,
261 unsigned int file_sz) {} 279 unsigned int file_sz) {}
280static inline void update_pecoff_bss(unsigned int file_sz,
281 unsigned int init_sz) {}
262static inline void efi_stub_defaults(void) {} 282static inline void efi_stub_defaults(void) {}
263static inline void efi_stub_entry_update(void) {} 283static inline void efi_stub_entry_update(void) {}
264 284
@@ -310,7 +330,7 @@ static void parse_zoffset(char *fname)
310 330
311int main(int argc, char ** argv) 331int main(int argc, char ** argv)
312{ 332{
313 unsigned int i, sz, setup_sectors; 333 unsigned int i, sz, setup_sectors, init_sz;
314 int c; 334 int c;
315 u32 sys_size; 335 u32 sys_size;
316 struct stat sb; 336 struct stat sb;
@@ -376,7 +396,9 @@ int main(int argc, char ** argv)
376 buf[0x1f1] = setup_sectors-1; 396 buf[0x1f1] = setup_sectors-1;
377 put_unaligned_le32(sys_size, &buf[0x1f4]); 397 put_unaligned_le32(sys_size, &buf[0x1f4]);
378 398
379 update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); 399 update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
400 init_sz = get_unaligned_le32(&buf[0x260]);
401 update_pecoff_bss(i + (sys_size * 16), init_sz);
380 402
381 efi_stub_entry_update(); 403 efi_stub_entry_update();
382 404
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index bba3cf88e624..0a8b519226b8 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -129,7 +129,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
129 129
130#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */ 130#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
131 131
132#define INTERRUPT_RETURN iretq 132#define INTERRUPT_RETURN jmp native_iret
133#define USERGS_SYSRET64 \ 133#define USERGS_SYSRET64 \
134 swapgs; \ 134 swapgs; \
135 sysretq; 135 sysretq;
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index f3a1f04ed4cb..584874451414 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -841,7 +841,6 @@ static int apm_do_idle(void)
841 u32 eax; 841 u32 eax;
842 u8 ret = 0; 842 u8 ret = 0;
843 int idled = 0; 843 int idled = 0;
844 int polling;
845 int err = 0; 844 int err = 0;
846 845
847 if (!need_resched()) { 846 if (!need_resched()) {
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index a80029035bf2..f9e4fdd3b877 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
370 */ 370 */
371 detect_extended_topology(c); 371 detect_extended_topology(c);
372 372
373 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
374 /*
375 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
376 * detection.
377 */
378 c->x86_max_cores = intel_num_cpu_cores(c);
379#ifdef CONFIG_X86_32
380 detect_ht(c);
381#endif
382 }
383
373 l2 = init_intel_cacheinfo(c); 384 l2 = init_intel_cacheinfo(c);
374 if (c->cpuid_level > 9) { 385 if (c->cpuid_level > 9) {
375 unsigned eax = cpuid_eax(10); 386 unsigned eax = cpuid_eax(10);
@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
438 set_cpu_cap(c, X86_FEATURE_P3); 449 set_cpu_cap(c, X86_FEATURE_P3);
439#endif 450#endif
440 451
441 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
442 /*
443 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
444 * detection.
445 */
446 c->x86_max_cores = intel_num_cpu_cores(c);
447#ifdef CONFIG_X86_32
448 detect_ht(c);
449#endif
450 }
451
452 /* Work around errata */ 452 /* Work around errata */
453 srat_detect_node(c); 453 srat_detect_node(c);
454 454
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index a952e9c85b6f..9c8f7394c612 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
730#endif 730#endif
731 } 731 }
732 732
733#ifdef CONFIG_X86_HT
734 /*
735 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
736 * turns means that the only possibility is SMT (as indicated in
737 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
738 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
739 * c->phys_proc_id.
740 */
741 if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
742 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
743#endif
744
733 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); 745 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
734 746
735 return l2; 747 return l2;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bb92f38153b2..9a79c8dbd8e8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void)
2451 for_each_online_cpu(i) { 2451 for_each_online_cpu(i) {
2452 err = mce_device_create(i); 2452 err = mce_device_create(i);
2453 if (err) { 2453 if (err) {
2454 /*
2455 * Register notifier anyway (and do not unreg it) so
2456 * that we don't leave undeleted timers, see notifier
2457 * callback above.
2458 */
2459 __register_hotcpu_notifier(&mce_cpu_notifier);
2454 cpu_notifier_register_done(); 2460 cpu_notifier_register_done();
2455 goto err_device_create; 2461 goto err_device_create;
2456 } 2462 }
@@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void)
2471err_register: 2477err_register:
2472 unregister_syscore_ops(&mce_syscore_ops); 2478 unregister_syscore_ops(&mce_syscore_ops);
2473 2479
2474 cpu_notifier_register_begin();
2475 __unregister_hotcpu_notifier(&mce_cpu_notifier);
2476 cpu_notifier_register_done();
2477
2478err_device_create: 2480err_device_create:
2479 /* 2481 /*
2480 * We didn't keep track of which devices were created above, but 2482 * We didn't keep track of which devices were created above, but
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2bdfbff8a4f6..2879ecdaac43 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
118 continue; 118 continue;
119 if (event->attr.config1 & ~er->valid_mask) 119 if (event->attr.config1 & ~er->valid_mask)
120 return -EINVAL; 120 return -EINVAL;
121 /* Check if the extra msrs can be safely accessed*/
122 if (!er->extra_msr_access)
123 return -ENXIO;
121 124
122 reg->idx = er->idx; 125 reg->idx = er->idx;
123 reg->config = event->attr.config1; 126 reg->config = event->attr.config1;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 3b2f9bdd974b..8ade93111e03 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -295,14 +295,16 @@ struct extra_reg {
295 u64 config_mask; 295 u64 config_mask;
296 u64 valid_mask; 296 u64 valid_mask;
297 int idx; /* per_xxx->regs[] reg index */ 297 int idx; /* per_xxx->regs[] reg index */
298 bool extra_msr_access;
298}; 299};
299 300
300#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ 301#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
301 .event = (e), \ 302 .event = (e), \
302 .msr = (ms), \ 303 .msr = (ms), \
303 .config_mask = (m), \ 304 .config_mask = (m), \
304 .valid_mask = (vm), \ 305 .valid_mask = (vm), \
305 .idx = EXTRA_REG_##i, \ 306 .idx = EXTRA_REG_##i, \
307 .extra_msr_access = true, \
306 } 308 }
307 309
308#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ 310#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index adb02aa62af5..2502d0d9d246 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1382,6 +1382,15 @@ again:
1382 intel_pmu_lbr_read(); 1382 intel_pmu_lbr_read();
1383 1383
1384 /* 1384 /*
1385 * CondChgd bit 63 doesn't mean any overflow status. Ignore
1386 * and clear the bit.
1387 */
1388 if (__test_and_clear_bit(63, (unsigned long *)&status)) {
1389 if (!status)
1390 goto done;
1391 }
1392
1393 /*
1385 * PEBS overflow sets bit 62 in the global status register 1394 * PEBS overflow sets bit 62 in the global status register
1386 */ 1395 */
1387 if (__test_and_clear_bit(62, (unsigned long *)&status)) { 1396 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
@@ -2173,6 +2182,41 @@ static void intel_snb_check_microcode(void)
2173 } 2182 }
2174} 2183}
2175 2184
2185/*
2186 * Under certain circumstances, access certain MSR may cause #GP.
2187 * The function tests if the input MSR can be safely accessed.
2188 */
2189static bool check_msr(unsigned long msr, u64 mask)
2190{
2191 u64 val_old, val_new, val_tmp;
2192
2193 /*
2194 * Read the current value, change it and read it back to see if it
2195 * matches, this is needed to detect certain hardware emulators
2196 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
2197 */
2198 if (rdmsrl_safe(msr, &val_old))
2199 return false;
2200
2201 /*
2202 * Only change the bits which can be updated by wrmsrl.
2203 */
2204 val_tmp = val_old ^ mask;
2205 if (wrmsrl_safe(msr, val_tmp) ||
2206 rdmsrl_safe(msr, &val_new))
2207 return false;
2208
2209 if (val_new != val_tmp)
2210 return false;
2211
2212 /* Here it's sure that the MSR can be safely accessed.
2213 * Restore the old value and return.
2214 */
2215 wrmsrl(msr, val_old);
2216
2217 return true;
2218}
2219
2176static __init void intel_sandybridge_quirk(void) 2220static __init void intel_sandybridge_quirk(void)
2177{ 2221{
2178 x86_pmu.check_microcode = intel_snb_check_microcode; 2222 x86_pmu.check_microcode = intel_snb_check_microcode;
@@ -2262,7 +2306,8 @@ __init int intel_pmu_init(void)
2262 union cpuid10_ebx ebx; 2306 union cpuid10_ebx ebx;
2263 struct event_constraint *c; 2307 struct event_constraint *c;
2264 unsigned int unused; 2308 unsigned int unused;
2265 int version; 2309 struct extra_reg *er;
2310 int version, i;
2266 2311
2267 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 2312 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2268 switch (boot_cpu_data.x86) { 2313 switch (boot_cpu_data.x86) {
@@ -2465,6 +2510,9 @@ __init int intel_pmu_init(void)
2465 case 62: /* IvyBridge EP */ 2510 case 62: /* IvyBridge EP */
2466 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 2511 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2467 sizeof(hw_cache_event_ids)); 2512 sizeof(hw_cache_event_ids));
2513 /* dTLB-load-misses on IVB is different than SNB */
2514 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
2515
2468 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 2516 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2469 sizeof(hw_cache_extra_regs)); 2517 sizeof(hw_cache_extra_regs));
2470 2518
@@ -2565,6 +2613,34 @@ __init int intel_pmu_init(void)
2565 } 2613 }
2566 } 2614 }
2567 2615
2616 /*
2617 * Access LBR MSR may cause #GP under certain circumstances.
2618 * E.g. KVM doesn't support LBR MSR
2619 * Check all LBT MSR here.
2620 * Disable LBR access if any LBR MSRs can not be accessed.
2621 */
2622 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
2623 x86_pmu.lbr_nr = 0;
2624 for (i = 0; i < x86_pmu.lbr_nr; i++) {
2625 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
2626 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
2627 x86_pmu.lbr_nr = 0;
2628 }
2629
2630 /*
2631 * Access extra MSR may cause #GP under certain circumstances.
2632 * E.g. KVM doesn't support offcore event
2633 * Check all extra_regs here.
2634 */
2635 if (x86_pmu.extra_regs) {
2636 for (er = x86_pmu.extra_regs; er->msr; er++) {
2637 er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
2638 /* Disable LBR select mapping */
2639 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
2640 x86_pmu.lbr_sel_map = NULL;
2641 }
2642 }
2643
2568 /* Support full width counters using alternative MSR range */ 2644 /* Support full width counters using alternative MSR range */
2569 if (x86_pmu.intel_cap.full_width_write) { 2645 if (x86_pmu.intel_cap.full_width_write) {
2570 x86_pmu.max_period = x86_pmu.cntval_mask; 2646 x86_pmu.max_period = x86_pmu.cntval_mask;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 980970cb744d..696ade311ded 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu)
311 if (!x86_pmu.bts) 311 if (!x86_pmu.bts)
312 return 0; 312 return 0;
313 313
314 buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node); 314 buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
315 if (unlikely(!buffer)) 315 if (unlikely(!buffer)) {
316 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
316 return -ENOMEM; 317 return -ENOMEM;
318 }
317 319
318 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; 320 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
319 thresh = max / 16; 321 thresh = max / 16;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 65bbbea38b9c..ae6552a0701f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
550 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), 550 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
551 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), 551 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
552 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), 552 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
553 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc), 553 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
554 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc), 554 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
555 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), 555 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
556 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), 556 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
557 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), 557 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
558 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), 558 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
559 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), 559 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
560 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), 560 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
561 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc), 561 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
562 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc), 562 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
563 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), 563 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
564 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), 564 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
565 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), 565 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1222 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 1222 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1223 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 1223 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1224 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), 1224 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1225
1225 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 1226 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1226 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), 1227 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1227 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), 1228 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1245 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), 1246 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1246 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), 1247 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1247 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), 1248 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1248 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 1249 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1249 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 1250 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1250 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), 1251 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1251 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), 1252 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index dbaa23e78b36..0d0c9d4ab6d5 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -425,8 +425,8 @@ sysenter_do_call:
425 cmpl $(NR_syscalls), %eax 425 cmpl $(NR_syscalls), %eax
426 jae sysenter_badsys 426 jae sysenter_badsys
427 call *sys_call_table(,%eax,4) 427 call *sys_call_table(,%eax,4)
428 movl %eax,PT_EAX(%esp)
429sysenter_after_call: 428sysenter_after_call:
429 movl %eax,PT_EAX(%esp)
430 LOCKDEP_SYS_EXIT 430 LOCKDEP_SYS_EXIT
431 DISABLE_INTERRUPTS(CLBR_ANY) 431 DISABLE_INTERRUPTS(CLBR_ANY)
432 TRACE_IRQS_OFF 432 TRACE_IRQS_OFF
@@ -502,6 +502,7 @@ ENTRY(system_call)
502 jae syscall_badsys 502 jae syscall_badsys
503syscall_call: 503syscall_call:
504 call *sys_call_table(,%eax,4) 504 call *sys_call_table(,%eax,4)
505syscall_after_call:
505 movl %eax,PT_EAX(%esp) # store the return value 506 movl %eax,PT_EAX(%esp) # store the return value
506syscall_exit: 507syscall_exit:
507 LOCKDEP_SYS_EXIT 508 LOCKDEP_SYS_EXIT
@@ -675,12 +676,12 @@ syscall_fault:
675END(syscall_fault) 676END(syscall_fault)
676 677
677syscall_badsys: 678syscall_badsys:
678 movl $-ENOSYS,PT_EAX(%esp) 679 movl $-ENOSYS,%eax
679 jmp syscall_exit 680 jmp syscall_after_call
680END(syscall_badsys) 681END(syscall_badsys)
681 682
682sysenter_badsys: 683sysenter_badsys:
683 movl $-ENOSYS,PT_EAX(%esp) 684 movl $-ENOSYS,%eax
684 jmp sysenter_after_call 685 jmp sysenter_after_call
685END(syscall_badsys) 686END(syscall_badsys)
686 CFI_ENDPROC 687 CFI_ENDPROC
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b25ca969edd2..c844f0816ab8 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -830,27 +830,24 @@ restore_args:
830 RESTORE_ARGS 1,8,1 830 RESTORE_ARGS 1,8,1
831 831
832irq_return: 832irq_return:
833 INTERRUPT_RETURN
834
835ENTRY(native_iret)
833 /* 836 /*
834 * Are we returning to a stack segment from the LDT? Note: in 837 * Are we returning to a stack segment from the LDT? Note: in
835 * 64-bit mode SS:RSP on the exception stack is always valid. 838 * 64-bit mode SS:RSP on the exception stack is always valid.
836 */ 839 */
837#ifdef CONFIG_X86_ESPFIX64 840#ifdef CONFIG_X86_ESPFIX64
838 testb $4,(SS-RIP)(%rsp) 841 testb $4,(SS-RIP)(%rsp)
839 jnz irq_return_ldt 842 jnz native_irq_return_ldt
840#endif 843#endif
841 844
842irq_return_iret: 845native_irq_return_iret:
843 INTERRUPT_RETURN
844 _ASM_EXTABLE(irq_return_iret, bad_iret)
845
846#ifdef CONFIG_PARAVIRT
847ENTRY(native_iret)
848 iretq 846 iretq
849 _ASM_EXTABLE(native_iret, bad_iret) 847 _ASM_EXTABLE(native_irq_return_iret, bad_iret)
850#endif
851 848
852#ifdef CONFIG_X86_ESPFIX64 849#ifdef CONFIG_X86_ESPFIX64
853irq_return_ldt: 850native_irq_return_ldt:
854 pushq_cfi %rax 851 pushq_cfi %rax
855 pushq_cfi %rdi 852 pushq_cfi %rdi
856 SWAPGS 853 SWAPGS
@@ -872,7 +869,7 @@ irq_return_ldt:
872 SWAPGS 869 SWAPGS
873 movq %rax,%rsp 870 movq %rax,%rsp
874 popq_cfi %rax 871 popq_cfi %rax
875 jmp irq_return_iret 872 jmp native_irq_return_iret
876#endif 873#endif
877 874
878 .section .fixup,"ax" 875 .section .fixup,"ax"
@@ -956,13 +953,8 @@ __do_double_fault:
956 cmpl $__KERNEL_CS,CS(%rdi) 953 cmpl $__KERNEL_CS,CS(%rdi)
957 jne do_double_fault 954 jne do_double_fault
958 movq RIP(%rdi),%rax 955 movq RIP(%rdi),%rax
959 cmpq $irq_return_iret,%rax 956 cmpq $native_irq_return_iret,%rax
960#ifdef CONFIG_PARAVIRT
961 je 1f
962 cmpq $native_iret,%rax
963#endif
964 jne do_double_fault /* This shouldn't happen... */ 957 jne do_double_fault /* This shouldn't happen... */
9651:
966 movq PER_CPU_VAR(kernel_stack),%rax 958 movq PER_CPU_VAR(kernel_stack),%rax
967 subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */ 959 subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
968 movq %rax,RSP(%rdi) 960 movq %rax,RSP(%rdi)
@@ -1428,7 +1420,7 @@ error_sti:
1428 */ 1420 */
1429error_kernelspace: 1421error_kernelspace:
1430 incl %ebx 1422 incl %ebx
1431 leaq irq_return_iret(%rip),%rcx 1423 leaq native_irq_return_iret(%rip),%rcx
1432 cmpq %rcx,RIP+8(%rsp) 1424 cmpq %rcx,RIP+8(%rsp)
1433 je error_swapgs 1425 je error_swapgs
1434 movl %ecx,%eax /* zero extend */ 1426 movl %ecx,%eax /* zero extend */
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 6afbb16e9b79..94d857fb1033 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -175,7 +175,7 @@ void init_espfix_ap(void)
175 if (!pud_present(pud)) { 175 if (!pud_present(pud)) {
176 pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); 176 pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
177 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); 177 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
178 paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); 178 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
179 for (n = 0; n < ESPFIX_PUD_CLONES; n++) 179 for (n = 0; n < ESPFIX_PUD_CLONES; n++)
180 set_pud(&pud_p[n], pud); 180 set_pud(&pud_p[n], pud);
181 } 181 }
@@ -185,7 +185,7 @@ void init_espfix_ap(void)
185 if (!pmd_present(pmd)) { 185 if (!pmd_present(pmd)) {
186 pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); 186 pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
187 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); 187 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
188 paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT); 188 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
189 for (n = 0; n < ESPFIX_PMD_CLONES; n++) 189 for (n = 0; n < ESPFIX_PMD_CLONES; n++)
190 set_pmd(&pmd_p[n], pmd); 190 set_pmd(&pmd_p[n], pmd);
191 } 191 }
@@ -193,7 +193,6 @@ void init_espfix_ap(void)
193 pte_p = pte_offset_kernel(&pmd, addr); 193 pte_p = pte_offset_kernel(&pmd, addr);
194 stack_page = (void *)__get_free_page(GFP_KERNEL); 194 stack_page = (void *)__get_free_page(GFP_KERNEL);
195 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); 195 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
196 paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT);
197 for (n = 0; n < ESPFIX_PTE_CLONES; n++) 196 for (n = 0; n < ESPFIX_PTE_CLONES; n++)
198 set_pte(&pte_p[n*PTE_STRIDE], pte); 197 set_pte(&pte_p[n*PTE_STRIDE], pte);
199 198
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 7596df664901..67e6d19ef1be 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
574 struct kprobe *p; 574 struct kprobe *p;
575 struct kprobe_ctlblk *kcb; 575 struct kprobe_ctlblk *kcb;
576 576
577 if (user_mode_vm(regs))
578 return 0;
579
577 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); 580 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
578 /* 581 /*
579 * We don't want to be preempted for the entire 582 * We don't want to be preempted for the entire
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 3f08f34f93eb..a1da6737ba5b 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -6,7 +6,6 @@ DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
6DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); 6DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
7DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq"); 7DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
8DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); 8DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
9DEF_NATIVE(pv_cpu_ops, iret, "iretq");
10DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); 9DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
11DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); 10DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
12DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); 11DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
@@ -50,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
50 PATCH_SITE(pv_irq_ops, save_fl); 49 PATCH_SITE(pv_irq_ops, save_fl);
51 PATCH_SITE(pv_irq_ops, irq_enable); 50 PATCH_SITE(pv_irq_ops, irq_enable);
52 PATCH_SITE(pv_irq_ops, irq_disable); 51 PATCH_SITE(pv_irq_ops, irq_disable);
53 PATCH_SITE(pv_cpu_ops, iret);
54 PATCH_SITE(pv_cpu_ops, irq_enable_sysexit); 52 PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
55 PATCH_SITE(pv_cpu_ops, usergs_sysret32); 53 PATCH_SITE(pv_cpu_ops, usergs_sysret32);
56 PATCH_SITE(pv_cpu_ops, usergs_sysret64); 54 PATCH_SITE(pv_cpu_ops, usergs_sysret64);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 57e5ce126d5a..ea030319b321 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -920,9 +920,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
920 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); 920 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
921 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 921 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
922 mark_tsc_unstable("cpufreq changes"); 922 mark_tsc_unstable("cpufreq changes");
923 }
924 923
925 set_cyc2ns_scale(tsc_khz, freq->cpu); 924 set_cyc2ns_scale(tsc_khz, freq->cpu);
925 }
926 926
927 return 0; 927 return 0;
928} 928}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f6449334ec45..ef432f891d30 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
5887 kvm_x86_ops->set_nmi(vcpu); 5887 kvm_x86_ops->set_nmi(vcpu);
5888 } 5888 }
5889 } else if (kvm_cpu_has_injectable_intr(vcpu)) { 5889 } else if (kvm_cpu_has_injectable_intr(vcpu)) {
5890 /*
5891 * Because interrupts can be injected asynchronously, we are
5892 * calling check_nested_events again here to avoid a race condition.
5893 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
5894 * proposal and current concerns. Perhaps we should be setting
5895 * KVM_REQ_EVENT only on certain events and not unconditionally?
5896 */
5897 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
5898 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
5899 if (r != 0)
5900 return r;
5901 }
5890 if (kvm_x86_ops->interrupt_allowed(vcpu)) { 5902 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
5891 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), 5903 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
5892 false); 5904 false);
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index c98583588580..ebfa9b2c871d 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -36,99 +36,133 @@
36 36
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/mm.h> 38#include <linux/mm.h>
39#include <linux/slab.h>
39#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
40 41
41#include <xen/interface/xen.h> 42#include <xen/interface/xen.h>
42#include <xen/page.h> 43#include <xen/page.h>
43#include <xen/grant_table.h> 44#include <xen/grant_table.h>
45#include <xen/xen.h>
44 46
45#include <asm/pgtable.h> 47#include <asm/pgtable.h>
46 48
47static int map_pte_fn(pte_t *pte, struct page *pmd_page, 49static struct gnttab_vm_area {
48 unsigned long addr, void *data) 50 struct vm_struct *area;
51 pte_t **ptes;
52} gnttab_shared_vm_area, gnttab_status_vm_area;
53
54int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
55 unsigned long max_nr_gframes,
56 void **__shared)
49{ 57{
50 unsigned long **frames = (unsigned long **)data; 58 void *shared = *__shared;
59 unsigned long addr;
60 unsigned long i;
51 61
52 set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL)); 62 if (shared == NULL)
53 (*frames)++; 63 *__shared = shared = gnttab_shared_vm_area.area->addr;
54 return 0;
55}
56 64
57/* 65 addr = (unsigned long)shared;
58 * This function is used to map shared frames to store grant status. It is 66
59 * different from map_pte_fn above, the frames type here is uint64_t. 67 for (i = 0; i < nr_gframes; i++) {
60 */ 68 set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i],
61static int map_pte_fn_status(pte_t *pte, struct page *pmd_page, 69 mfn_pte(frames[i], PAGE_KERNEL));
62 unsigned long addr, void *data) 70 addr += PAGE_SIZE;
63{ 71 }
64 uint64_t **frames = (uint64_t **)data;
65 72
66 set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
67 (*frames)++;
68 return 0; 73 return 0;
69} 74}
70 75
71static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, 76int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
72 unsigned long addr, void *data) 77 unsigned long max_nr_gframes,
78 grant_status_t **__shared)
73{ 79{
80 grant_status_t *shared = *__shared;
81 unsigned long addr;
82 unsigned long i;
83
84 if (shared == NULL)
85 *__shared = shared = gnttab_status_vm_area.area->addr;
86
87 addr = (unsigned long)shared;
88
89 for (i = 0; i < nr_gframes; i++) {
90 set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i],
91 mfn_pte(frames[i], PAGE_KERNEL));
92 addr += PAGE_SIZE;
93 }
74 94
75 set_pte_at(&init_mm, addr, pte, __pte(0));
76 return 0; 95 return 0;
77} 96}
78 97
79int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, 98void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
80 unsigned long max_nr_gframes,
81 void **__shared)
82{ 99{
83 int rc; 100 pte_t **ptes;
84 void *shared = *__shared; 101 unsigned long addr;
102 unsigned long i;
85 103
86 if (shared == NULL) { 104 if (shared == gnttab_status_vm_area.area->addr)
87 struct vm_struct *area = 105 ptes = gnttab_status_vm_area.ptes;
88 alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL); 106 else
89 BUG_ON(area == NULL); 107 ptes = gnttab_shared_vm_area.ptes;
90 shared = area->addr;
91 *__shared = shared;
92 }
93 108
94 rc = apply_to_page_range(&init_mm, (unsigned long)shared, 109 addr = (unsigned long)shared;
95 PAGE_SIZE * nr_gframes, 110
96 map_pte_fn, &frames); 111 for (i = 0; i < nr_gframes; i++) {
97 return rc; 112 set_pte_at(&init_mm, addr, ptes[i], __pte(0));
113 addr += PAGE_SIZE;
114 }
98} 115}
99 116
100int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, 117static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
101 unsigned long max_nr_gframes,
102 grant_status_t **__shared)
103{ 118{
104 int rc; 119 area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL);
105 grant_status_t *shared = *__shared; 120 if (area->ptes == NULL)
121 return -ENOMEM;
106 122
107 if (shared == NULL) { 123 area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes);
108 /* No need to pass in PTE as we are going to do it 124 if (area->area == NULL) {
109 * in apply_to_page_range anyhow. */ 125 kfree(area->ptes);
110 struct vm_struct *area = 126 return -ENOMEM;
111 alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
112 BUG_ON(area == NULL);
113 shared = area->addr;
114 *__shared = shared;
115 } 127 }
116 128
117 rc = apply_to_page_range(&init_mm, (unsigned long)shared, 129 return 0;
118 PAGE_SIZE * nr_gframes,
119 map_pte_fn_status, &frames);
120 return rc;
121} 130}
122 131
123void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) 132static void arch_gnttab_vfree(struct gnttab_vm_area *area)
133{
134 free_vm_area(area->area);
135 kfree(area->ptes);
136}
137
138int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
124{ 139{
125 apply_to_page_range(&init_mm, (unsigned long)shared, 140 int ret;
126 PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL); 141
142 if (!xen_pv_domain())
143 return 0;
144
145 ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
146 if (ret < 0)
147 return ret;
148
149 /*
150 * Always allocate the space for the status frames in case
151 * we're migrated to a host with V2 support.
152 */
153 ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status);
154 if (ret < 0)
155 goto err;
156
157 return 0;
158 err:
159 arch_gnttab_vfree(&gnttab_shared_vm_area);
160 return -ENOMEM;
127} 161}
162
128#ifdef CONFIG_XEN_PVH 163#ifdef CONFIG_XEN_PVH
129#include <xen/balloon.h> 164#include <xen/balloon.h>
130#include <xen/events.h> 165#include <xen/events.h>
131#include <xen/xen.h>
132#include <linux/slab.h> 166#include <linux/slab.h>
133static int __init xlated_setup_gnttab_pages(void) 167static int __init xlated_setup_gnttab_pages(void)
134{ 168{
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index f9e1ec346e35..8453e6e39895 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
376 beqz a2, 1f # if at start of vector, don't restore 376 beqz a2, 1f # if at start of vector, don't restore
377 377
378 addi a0, a0, -128 378 addi a0, a0, -128
379 bbsi a0, 8, 1f # don't restore except for overflow 8 and 12 379 bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
380 bbsi a0, 7, 2f 380
381 /*
382 * This fixup handler is for the extremely unlikely case where the
383 * overflow handler's reference thru a0 gets a hardware TLB refill
384 * that bumps out the (distinct, aliasing) TLB entry that mapped its
385 * prior references thru a9/a13, and where our reference now thru
386 * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
387 */
388 movi a2, window_overflow_restore_a0_fixup
389 s32i a2, a3, EXC_TABLE_FIXUP
390 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
391 xsr a3, excsave1
392
393 bbsi.l a0, 7, 2f
381 394
382 /* 395 /*
383 * Restore a0 as saved by _WindowOverflow8(). 396 * Restore a0 as saved by _WindowOverflow8().
384 *
385 * FIXME: we really need a fixup handler for this L32E,
386 * for the extremely unlikely case where the overflow handler's
387 * reference thru a0 gets a hardware TLB refill that bumps out
388 * the (distinct, aliasing) TLB entry that mapped its prior
389 * references thru a9, and where our reference now thru a9
390 * gets a 2nd-level miss exception (not hardware TLB refill).
391 */ 397 */
392 398
393 l32e a2, a9, -16 399 l32e a0, a9, -16
394 wsr a2, depc # replace the saved a0 400 wsr a0, depc # replace the saved a0
395 j 1f 401 j 3f
396 402
3972: 4032:
398 /* 404 /*
399 * Restore a0 as saved by _WindowOverflow12(). 405 * Restore a0 as saved by _WindowOverflow12().
400 *
401 * FIXME: we really need a fixup handler for this L32E,
402 * for the extremely unlikely case where the overflow handler's
403 * reference thru a0 gets a hardware TLB refill that bumps out
404 * the (distinct, aliasing) TLB entry that mapped its prior
405 * references thru a13, and where our reference now thru a13
406 * gets a 2nd-level miss exception (not hardware TLB refill).
407 */ 406 */
408 407
409 l32e a2, a13, -16 408 l32e a0, a13, -16
410 wsr a2, depc # replace the saved a0 409 wsr a0, depc # replace the saved a0
4103:
411 xsr a3, excsave1
412 movi a0, 0
413 s32i a0, a3, EXC_TABLE_FIXUP
414 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
4111: 4151:
412 /* 416 /*
413 * Restore WindowBase while leaving all address registers restored. 417 * Restore WindowBase while leaving all address registers restored.
@@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
449 453
450 s32i a0, a2, PT_DEPC 454 s32i a0, a2, PT_DEPC
451 455
456_DoubleExceptionVector_handle_exception:
452 addx4 a0, a0, a3 457 addx4 a0, a0, a3
453 l32i a0, a0, EXC_TABLE_FAST_USER 458 l32i a0, a0, EXC_TABLE_FAST_USER
454 xsr a3, excsave1 459 xsr a3, excsave1
@@ -464,11 +469,120 @@ _DoubleExceptionVector_WindowOverflow:
464 rotw -3 469 rotw -3
465 j 1b 470 j 1b
466 471
467 .end literal_prefix
468 472
469ENDPROC(_DoubleExceptionVector) 473ENDPROC(_DoubleExceptionVector)
470 474
471/* 475/*
476 * Fixup handler for TLB miss in double exception handler for window owerflow.
477 * We get here with windowbase set to the window that was being spilled and
478 * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
479 * (bit set) window.
480 *
481 * We do the following here:
482 * - go to the original window retaining a0 value;
483 * - set up exception stack to return back to appropriate a0 restore code
484 * (we'll need to rotate window back and there's no place to save this
485 * information, use different return address for that);
486 * - handle the exception;
487 * - go to the window that was being spilled;
488 * - set up window_overflow_restore_a0_fixup as a fixup routine;
489 * - reload a0;
490 * - restore the original window;
491 * - reset the default fixup routine;
492 * - return to user. By the time we get to this fixup handler all information
493 * about the conditions of the original double exception that happened in
494 * the window overflow handler is lost, so we just return to userspace to
495 * retry overflow from start.
496 *
497 * a0: value of depc, original value in depc
498 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
499 * a3: exctable, original value in excsave1
500 */
501
502ENTRY(window_overflow_restore_a0_fixup)
503
504 rsr a0, ps
505 extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
506 rsr a2, windowbase
507 sub a0, a2, a0
508 extui a0, a0, 0, 3
509 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
510 xsr a3, excsave1
511
512 _beqi a0, 1, .Lhandle_1
513 _beqi a0, 3, .Lhandle_3
514
515 .macro overflow_fixup_handle_exception_pane n
516
517 rsr a0, depc
518 rotw -\n
519
520 xsr a3, excsave1
521 wsr a2, depc
522 l32i a2, a3, EXC_TABLE_KSTK
523 s32i a0, a2, PT_AREG0
524
525 movi a0, .Lrestore_\n
526 s32i a0, a2, PT_DEPC
527 rsr a0, exccause
528 j _DoubleExceptionVector_handle_exception
529
530 .endm
531
532 overflow_fixup_handle_exception_pane 2
533.Lhandle_1:
534 overflow_fixup_handle_exception_pane 1
535.Lhandle_3:
536 overflow_fixup_handle_exception_pane 3
537
538 .macro overflow_fixup_restore_a0_pane n
539
540 rotw \n
541 /* Need to preserve a0 value here to be able to handle exception
542 * that may occur on a0 reload from stack. It may occur because
543 * TLB miss handler may not be atomic and pointer to page table
544 * may be lost before we get here. There are no free registers,
545 * so we need to use EXC_TABLE_DOUBLE_SAVE area.
546 */
547 xsr a3, excsave1
548 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
549 movi a2, window_overflow_restore_a0_fixup
550 s32i a2, a3, EXC_TABLE_FIXUP
551 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
552 xsr a3, excsave1
553 bbsi.l a0, 7, 1f
554 l32e a0, a9, -16
555 j 2f
5561:
557 l32e a0, a13, -16
5582:
559 rotw -\n
560
561 .endm
562
563.Lrestore_2:
564 overflow_fixup_restore_a0_pane 2
565
566.Lset_default_fixup:
567 xsr a3, excsave1
568 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
569 movi a2, 0
570 s32i a2, a3, EXC_TABLE_FIXUP
571 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
572 xsr a3, excsave1
573 rfe
574
575.Lrestore_1:
576 overflow_fixup_restore_a0_pane 1
577 j .Lset_default_fixup
578.Lrestore_3:
579 overflow_fixup_restore_a0_pane 3
580 j .Lset_default_fixup
581
582ENDPROC(window_overflow_restore_a0_fixup)
583
584 .end literal_prefix
585/*
472 * Debug interrupt vector 586 * Debug interrupt vector
473 * 587 *
474 * There is not much space here, so simply jump to another handler. 588 * There is not much space here, so simply jump to another handler.
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index ee32c0085dff..d16db6df86f8 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -269,13 +269,13 @@ SECTIONS
269 .UserExceptionVector.literal) 269 .UserExceptionVector.literal)
270 SECTION_VECTOR (_DoubleExceptionVector_literal, 270 SECTION_VECTOR (_DoubleExceptionVector_literal,
271 .DoubleExceptionVector.literal, 271 .DoubleExceptionVector.literal,
272 DOUBLEEXC_VECTOR_VADDR - 16, 272 DOUBLEEXC_VECTOR_VADDR - 40,
273 SIZEOF(.UserExceptionVector.text), 273 SIZEOF(.UserExceptionVector.text),
274 .UserExceptionVector.text) 274 .UserExceptionVector.text)
275 SECTION_VECTOR (_DoubleExceptionVector_text, 275 SECTION_VECTOR (_DoubleExceptionVector_text,
276 .DoubleExceptionVector.text, 276 .DoubleExceptionVector.text,
277 DOUBLEEXC_VECTOR_VADDR, 277 DOUBLEEXC_VECTOR_VADDR,
278 32, 278 40,
279 .DoubleExceptionVector.literal) 279 .DoubleExceptionVector.literal)
280 280
281 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; 281 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 4224256bb215..77ed20209ca5 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
191 return -EINVAL; 191 return -EINVAL;
192 } 192 }
193 193
194 if (it && start - it->start < bank_sz) { 194 if (it && start - it->start <= bank_sz) {
195 if (start == it->start) { 195 if (start == it->start) {
196 if (end - it->start < bank_sz) { 196 if (end - it->start < bank_sz) {
197 it->start = end; 197 it->start = end;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b9f4cc494ece..28d227c5ca77 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
872{ 872{
873 lockdep_assert_held(q->queue_lock); 873 lockdep_assert_held(q->queue_lock);
874 874
875 /*
876 * @q could be exiting and already have destroyed all blkgs as
877 * indicated by NULL root_blkg. If so, don't confuse policies.
878 */
879 if (!q->root_blkg)
880 return;
881
875 blk_throtl_drain(q); 882 blk_throtl_drain(q);
876} 883}
877 884
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 3f33d8672268..a185b86741e5 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
27EXPORT_SYMBOL(blk_queue_find_tag); 27EXPORT_SYMBOL(blk_queue_find_tag);
28 28
29/** 29/**
30 * __blk_free_tags - release a given set of tag maintenance info 30 * blk_free_tags - release a given set of tag maintenance info
31 * @bqt: the tag map to free 31 * @bqt: the tag map to free
32 * 32 *
33 * Tries to free the specified @bqt. Returns true if it was 33 * Drop the reference count on @bqt and frees it when the last reference
34 * actually freed and false if there are still references using it 34 * is dropped.
35 */ 35 */
36static int __blk_free_tags(struct blk_queue_tag *bqt) 36void blk_free_tags(struct blk_queue_tag *bqt)
37{ 37{
38 int retval; 38 if (atomic_dec_and_test(&bqt->refcnt)) {
39
40 retval = atomic_dec_and_test(&bqt->refcnt);
41 if (retval) {
42 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < 39 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
43 bqt->max_depth); 40 bqt->max_depth);
44 41
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
50 47
51 kfree(bqt); 48 kfree(bqt);
52 } 49 }
53
54 return retval;
55} 50}
51EXPORT_SYMBOL(blk_free_tags);
56 52
57/** 53/**
58 * __blk_queue_free_tags - release tag maintenance info 54 * __blk_queue_free_tags - release tag maintenance info
@@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q)
69 if (!bqt) 65 if (!bqt)
70 return; 66 return;
71 67
72 __blk_free_tags(bqt); 68 blk_free_tags(bqt);
73 69
74 q->queue_tags = NULL; 70 q->queue_tags = NULL;
75 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); 71 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
76} 72}
77 73
78/** 74/**
79 * blk_free_tags - release a given set of tag maintenance info
80 * @bqt: the tag map to free
81 *
82 * For externally managed @bqt frees the map. Callers of this
83 * function must guarantee to have released all the queues that
84 * might have been using this tag map.
85 */
86void blk_free_tags(struct blk_queue_tag *bqt)
87{
88 if (unlikely(!__blk_free_tags(bqt)))
89 BUG();
90}
91EXPORT_SYMBOL(blk_free_tags);
92
93/**
94 * blk_queue_free_tags - release tag maintenance info 75 * blk_queue_free_tags - release tag maintenance info
95 * @q: the request queue for the device 76 * @q: the request queue for the device
96 * 77 *
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index fbd5a67cb773..a0926a6094b2 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
690 case BLKROSET: 690 case BLKROSET:
691 case BLKDISCARD: 691 case BLKDISCARD:
692 case BLKSECDISCARD: 692 case BLKSECDISCARD:
693 case BLKZEROOUT:
693 /* 694 /*
694 * the ones below are implemented in blkdev_locked_ioctl, 695 * the ones below are implemented in blkdev_locked_ioctl,
695 * but we call blkdev_ioctl, which gets the lock for us 696 * but we call blkdev_ioctl, which gets the lock for us
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 966f893711b3..6a3ad8011585 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/net.h> 22#include <linux/net.h>
23#include <linux/rwsem.h> 23#include <linux/rwsem.h>
24#include <linux/security.h>
24 25
25struct alg_type_list { 26struct alg_type_list {
26 const struct af_alg_type *type; 27 const struct af_alg_type *type;
@@ -243,6 +244,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
243 244
244 sock_init_data(newsock, sk2); 245 sock_init_data(newsock, sk2);
245 sock_graft(sk2, newsock); 246 sock_graft(sk2, newsock);
247 security_sk_clone(sk, sk2);
246 248
247 err = type->accept(ask->private, sk2); 249 err = type->accept(ask->private, sk2);
248 if (err) { 250 if (err) {
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 071c1dfb93f3..350d52a8f781 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -68,7 +68,7 @@ MODULE_AUTHOR("Bruno Ducrot");
68MODULE_DESCRIPTION("ACPI Video Driver"); 68MODULE_DESCRIPTION("ACPI Video Driver");
69MODULE_LICENSE("GPL"); 69MODULE_LICENSE("GPL");
70 70
71static bool brightness_switch_enabled; 71static bool brightness_switch_enabled = 1;
72module_param(brightness_switch_enabled, bool, 0644); 72module_param(brightness_switch_enabled, bool, 0644);
73 73
74/* 74/*
@@ -581,6 +581,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
581 }, 581 },
582 { 582 {
583 .callback = video_set_use_native_backlight, 583 .callback = video_set_use_native_backlight,
584 .ident = "HP ProBook 4540s",
585 .matches = {
586 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
587 DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
588 },
589 },
590 {
591 .callback = video_set_use_native_backlight,
584 .ident = "HP ProBook 2013 models", 592 .ident = "HP ProBook 2013 models",
585 .matches = { 593 .matches = {
586 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 594 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dae5607e1115..4cd52a4541a9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
456 456
457 /* Promise */ 457 /* Promise */
458 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 458 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
459 { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
459 460
460 /* Asmedia */ 461 /* Asmedia */
461 { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ 462 { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 18d97d5c7d90..677c0c1b03bd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4787 * ata_qc_new - Request an available ATA command, for queueing 4787 * ata_qc_new - Request an available ATA command, for queueing
4788 * @ap: target port 4788 * @ap: target port
4789 * 4789 *
4790 * Some ATA host controllers may implement a queue depth which is less
4791 * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
4792 * the hardware limitation.
4793 *
4790 * LOCKING: 4794 * LOCKING:
4791 * None. 4795 * None.
4792 */ 4796 */
@@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4794static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4798static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4795{ 4799{
4796 struct ata_queued_cmd *qc = NULL; 4800 struct ata_queued_cmd *qc = NULL;
4801 unsigned int max_queue = ap->host->n_tags;
4797 unsigned int i, tag; 4802 unsigned int i, tag;
4798 4803
4799 /* no command while frozen */ 4804 /* no command while frozen */
4800 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4805 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4801 return NULL; 4806 return NULL;
4802 4807
4803 for (i = 0; i < ATA_MAX_QUEUE; i++) { 4808 for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
4804 tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE; 4809 tag = tag < max_queue ? tag : 0;
4805 4810
4806 /* the last tag is reserved for internal command. */ 4811 /* the last tag is reserved for internal command. */
4807 if (tag == ATA_TAG_INTERNAL) 4812 if (tag == ATA_TAG_INTERNAL)
@@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
6088{ 6093{
6089 spin_lock_init(&host->lock); 6094 spin_lock_init(&host->lock);
6090 mutex_init(&host->eh_mutex); 6095 mutex_init(&host->eh_mutex);
6096 host->n_tags = ATA_MAX_QUEUE - 1;
6091 host->dev = dev; 6097 host->dev = dev;
6092 host->ops = ops; 6098 host->ops = ops;
6093} 6099}
@@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6169{ 6175{
6170 int i, rc; 6176 int i, rc;
6171 6177
6178 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6179
6172 /* host must have been started */ 6180 /* host must have been started */
6173 if (!(host->flags & ATA_HOST_STARTED)) { 6181 if (!(host->flags & ATA_HOST_STARTED)) {
6174 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6182 dev_err(host->dev, "BUG: trying to register unstarted host\n");
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 6760fc4e85b8..dad83df555c4 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1811 case ATA_DEV_ATA: 1811 case ATA_DEV_ATA:
1812 if (err & ATA_ICRC) 1812 if (err & ATA_ICRC)
1813 qc->err_mask |= AC_ERR_ATA_BUS; 1813 qc->err_mask |= AC_ERR_ATA_BUS;
1814 if (err & ATA_UNC) 1814 if (err & (ATA_UNC | ATA_AMNF))
1815 qc->err_mask |= AC_ERR_MEDIA; 1815 qc->err_mask |= AC_ERR_MEDIA;
1816 if (err & ATA_IDNF) 1816 if (err & ATA_IDNF)
1817 qc->err_mask |= AC_ERR_INVALID; 1817 qc->err_mask |= AC_ERR_INVALID;
@@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link)
2556 } 2556 }
2557 2557
2558 if (cmd->command != ATA_CMD_PACKET && 2558 if (cmd->command != ATA_CMD_PACKET &&
2559 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 2559 (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
2560 ATA_ABORTED))) 2560 ATA_IDNF | ATA_ABORTED)))
2561 ata_dev_err(qc->dev, "error: { %s%s%s%s}\n", 2561 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
2562 res->feature & ATA_ICRC ? "ICRC " : "", 2562 res->feature & ATA_ICRC ? "ICRC " : "",
2563 res->feature & ATA_UNC ? "UNC " : "", 2563 res->feature & ATA_UNC ? "UNC " : "",
2564 res->feature & ATA_AMNF ? "AMNF " : "",
2564 res->feature & ATA_IDNF ? "IDNF " : "", 2565 res->feature & ATA_IDNF ? "IDNF " : "",
2565 res->feature & ATA_ABORTED ? "ABRT " : ""); 2566 res->feature & ATA_ABORTED ? "ABRT " : "");
2566#endif 2567#endif
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 6ad5c072ce34..4d37c5415fc7 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
915 struct ep93xx_pata_data *drv_data; 915 struct ep93xx_pata_data *drv_data;
916 struct ata_host *host; 916 struct ata_host *host;
917 struct ata_port *ap; 917 struct ata_port *ap;
918 unsigned int irq; 918 int irq;
919 struct resource *mem_res; 919 struct resource *mem_res;
920 void __iomem *ide_base; 920 void __iomem *ide_base;
921 int err; 921 int err;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9e9227e1762d..eee48c49f5de 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -89,8 +89,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
89 return dev->archdata.irqs[num]; 89 return dev->archdata.irqs[num];
90#else 90#else
91 struct resource *r; 91 struct resource *r;
92 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) 92 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
93 return of_irq_get(dev->dev.of_node, num); 93 int ret;
94
95 ret = of_irq_get(dev->dev.of_node, num);
96 if (ret >= 0 || ret == -EPROBE_DEFER)
97 return ret;
98 }
94 99
95 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 100 r = platform_get_resource(dev, IORESOURCE_IRQ, num);
96 101
@@ -133,8 +138,13 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
133{ 138{
134 struct resource *r; 139 struct resource *r;
135 140
136 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) 141 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
137 return of_irq_get_byname(dev->dev.of_node, name); 142 int ret;
143
144 ret = of_irq_get_byname(dev->dev.of_node, name);
145 if (ret >= 0 || ret == -EPROBE_DEFER)
146 return ret;
147 }
138 148
139 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 149 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
140 return r ? r->start : -ENXIO; 150 return r ? r->start : -ENXIO;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 1b35c45c92b7..3f2e16738080 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
544 struct task_struct *opa; 544 struct task_struct *opa;
545 545
546 kref_get(&connection->kref); 546 kref_get(&connection->kref);
547 /* We may just have force_sig()'ed this thread
548 * to get it out of some blocking network function.
549 * Clear signals; otherwise kthread_run(), which internally uses
550 * wait_on_completion_killable(), will mistake our pending signal
551 * for a new fatal signal and fail. */
552 flush_signals(current);
547 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h"); 553 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
548 if (IS_ERR(opa)) { 554 if (IS_ERR(opa)) {
549 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n"); 555 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 089e72cd37be..36e54be402df 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
622 memset(&zram->stats, 0, sizeof(zram->stats)); 622 memset(&zram->stats, 0, sizeof(zram->stats));
623 623
624 zram->disksize = 0; 624 zram->disksize = 0;
625 if (reset_capacity) { 625 if (reset_capacity)
626 set_capacity(zram->disk, 0); 626 set_capacity(zram->disk, 0);
627 revalidate_disk(zram->disk); 627
628 }
629 up_write(&zram->init_lock); 628 up_write(&zram->init_lock);
629
630 /*
631 * Revalidate disk out of the init_lock to avoid lockdep splat.
632 * It's okay because disk's capacity is protected by init_lock
633 * so that revalidate_disk always sees up-to-date capacity.
634 */
635 if (reset_capacity)
636 revalidate_disk(zram->disk);
630} 637}
631 638
632static ssize_t disksize_store(struct device *dev, 639static ssize_t disksize_store(struct device *dev,
@@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev,
666 zram->comp = comp; 673 zram->comp = comp;
667 zram->disksize = disksize; 674 zram->disksize = disksize;
668 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); 675 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
669 revalidate_disk(zram->disk);
670 up_write(&zram->init_lock); 676 up_write(&zram->init_lock);
677
678 /*
679 * Revalidate disk out of the init_lock to avoid lockdep splat.
680 * It's okay because disk's capacity is protected by init_lock
681 * so that revalidate_disk always sees up-to-date capacity.
682 */
683 revalidate_disk(zram->disk);
684
671 return len; 685 return len;
672 686
673out_destroy_comp: 687out_destroy_comp:
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index f98380648cb3..f50dffc0374f 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -90,7 +90,6 @@ static const struct usb_device_id ath3k_table[] = {
90 { USB_DEVICE(0x0b05, 0x17d0) }, 90 { USB_DEVICE(0x0b05, 0x17d0) },
91 { USB_DEVICE(0x0CF3, 0x0036) }, 91 { USB_DEVICE(0x0CF3, 0x0036) },
92 { USB_DEVICE(0x0CF3, 0x3004) }, 92 { USB_DEVICE(0x0CF3, 0x3004) },
93 { USB_DEVICE(0x0CF3, 0x3005) },
94 { USB_DEVICE(0x0CF3, 0x3008) }, 93 { USB_DEVICE(0x0CF3, 0x3008) },
95 { USB_DEVICE(0x0CF3, 0x311D) }, 94 { USB_DEVICE(0x0CF3, 0x311D) },
96 { USB_DEVICE(0x0CF3, 0x311E) }, 95 { USB_DEVICE(0x0CF3, 0x311E) },
@@ -140,7 +139,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
140 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, 139 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
141 { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, 140 { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
142 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 141 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
143 { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
144 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, 142 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
145 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, 143 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
146 { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 }, 144 { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a1c80b0c7663..6250fc2fb93a 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -162,7 +162,6 @@ static const struct usb_device_id blacklist_table[] = {
162 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, 162 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
163 { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, 163 { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
164 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 164 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
165 { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
166 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, 165 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
167 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, 166 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
168 { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, 167 { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 04680ead9275..fede8ca7147c 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
406 H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) { 406 H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
407 BT_ERR("Non-link packet received in non-active state"); 407 BT_ERR("Non-link packet received in non-active state");
408 h5_reset_rx(h5); 408 h5_reset_rx(h5);
409 return 0;
409 } 410 }
410 411
411 h5->rx_func = h5_rx_payload; 412 h5->rx_func = h5_rx_payload;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 334601cc81cf..c4419ea1ab07 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -55,16 +55,41 @@ static DEFINE_MUTEX(rng_mutex);
55static int data_avail; 55static int data_avail;
56static u8 *rng_buffer; 56static u8 *rng_buffer;
57 57
58static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
59 int wait);
60
58static size_t rng_buffer_size(void) 61static size_t rng_buffer_size(void)
59{ 62{
60 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; 63 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
61} 64}
62 65
66static void add_early_randomness(struct hwrng *rng)
67{
68 unsigned char bytes[16];
69 int bytes_read;
70
71 /*
72 * Currently only virtio-rng cannot return data during device
73 * probe, and that's handled in virtio-rng.c itself. If there
74 * are more such devices, this call to rng_get_data can be
75 * made conditional here instead of doing it per-device.
76 */
77 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
78 if (bytes_read > 0)
79 add_device_randomness(bytes, bytes_read);
80}
81
63static inline int hwrng_init(struct hwrng *rng) 82static inline int hwrng_init(struct hwrng *rng)
64{ 83{
65 if (!rng->init) 84 if (rng->init) {
66 return 0; 85 int ret;
67 return rng->init(rng); 86
87 ret = rng->init(rng);
88 if (ret)
89 return ret;
90 }
91 add_early_randomness(rng);
92 return 0;
68} 93}
69 94
70static inline void hwrng_cleanup(struct hwrng *rng) 95static inline void hwrng_cleanup(struct hwrng *rng)
@@ -304,8 +329,6 @@ int hwrng_register(struct hwrng *rng)
304{ 329{
305 int err = -EINVAL; 330 int err = -EINVAL;
306 struct hwrng *old_rng, *tmp; 331 struct hwrng *old_rng, *tmp;
307 unsigned char bytes[16];
308 int bytes_read;
309 332
310 if (rng->name == NULL || 333 if (rng->name == NULL ||
311 (rng->data_read == NULL && rng->read == NULL)) 334 (rng->data_read == NULL && rng->read == NULL))
@@ -347,9 +370,17 @@ int hwrng_register(struct hwrng *rng)
347 INIT_LIST_HEAD(&rng->list); 370 INIT_LIST_HEAD(&rng->list);
348 list_add_tail(&rng->list, &rng_list); 371 list_add_tail(&rng->list, &rng_list);
349 372
350 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); 373 if (old_rng && !rng->init) {
351 if (bytes_read > 0) 374 /*
352 add_device_randomness(bytes, bytes_read); 375 * Use a new device's input to add some randomness to
376 * the system. If this rng device isn't going to be
377 * used right away, its init function hasn't been
378 * called yet; so only use the randomness from devices
379 * that don't need an init callback.
380 */
381 add_early_randomness(rng);
382 }
383
353out_unlock: 384out_unlock:
354 mutex_unlock(&rng_mutex); 385 mutex_unlock(&rng_mutex);
355out: 386out:
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index f3e71501de54..e9b15bc18b4d 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -38,6 +38,8 @@ struct virtrng_info {
38 int index; 38 int index;
39}; 39};
40 40
41static bool probe_done;
42
41static void random_recv_done(struct virtqueue *vq) 43static void random_recv_done(struct virtqueue *vq)
42{ 44{
43 struct virtrng_info *vi = vq->vdev->priv; 45 struct virtrng_info *vi = vq->vdev->priv;
@@ -67,6 +69,13 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
67 int ret; 69 int ret;
68 struct virtrng_info *vi = (struct virtrng_info *)rng->priv; 70 struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
69 71
72 /*
73 * Don't ask host for data till we're setup. This call can
74 * happen during hwrng_register(), after commit d9e7972619.
75 */
76 if (unlikely(!probe_done))
77 return 0;
78
70 if (!vi->busy) { 79 if (!vi->busy) {
71 vi->busy = true; 80 vi->busy = true;
72 init_completion(&vi->have_data); 81 init_completion(&vi->have_data);
@@ -137,6 +146,7 @@ static int probe_common(struct virtio_device *vdev)
137 return err; 146 return err;
138 } 147 }
139 148
149 probe_done = true;
140 return 0; 150 return 0;
141} 151}
142 152
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0a7ac0a7b252..71529e196b84 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -641,7 +641,7 @@ retry:
641 } while (unlikely(entropy_count < pool_size-2 && pnfrac)); 641 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
642 } 642 }
643 643
644 if (entropy_count < 0) { 644 if (unlikely(entropy_count < 0)) {
645 pr_warn("random: negative entropy/overflow: pool %s count %d\n", 645 pr_warn("random: negative entropy/overflow: pool %s count %d\n",
646 r->name, entropy_count); 646 r->name, entropy_count);
647 WARN_ON(1); 647 WARN_ON(1);
@@ -981,7 +981,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
981 int reserved) 981 int reserved)
982{ 982{
983 int entropy_count, orig; 983 int entropy_count, orig;
984 size_t ibytes; 984 size_t ibytes, nfrac;
985 985
986 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); 986 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
987 987
@@ -999,7 +999,17 @@ retry:
999 } 999 }
1000 if (ibytes < min) 1000 if (ibytes < min)
1001 ibytes = 0; 1001 ibytes = 0;
1002 if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0) 1002
1003 if (unlikely(entropy_count < 0)) {
1004 pr_warn("random: negative entropy count: pool %s count %d\n",
1005 r->name, entropy_count);
1006 WARN_ON(1);
1007 entropy_count = 0;
1008 }
1009 nfrac = ibytes << (ENTROPY_SHIFT + 3);
1010 if ((size_t) entropy_count > nfrac)
1011 entropy_count -= nfrac;
1012 else
1003 entropy_count = 0; 1013 entropy_count = 0;
1004 1014
1005 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) 1015 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
@@ -1376,6 +1386,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1376 "with %d bits of entropy available\n", 1386 "with %d bits of entropy available\n",
1377 current->comm, nonblocking_pool.entropy_total); 1387 current->comm, nonblocking_pool.entropy_total);
1378 1388
1389 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
1379 ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); 1390 ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
1380 1391
1381 trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool), 1392 trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index e1581335937d..cb8e6f14e880 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -16,7 +16,7 @@
16#include <linux/clkdev.h> 16#include <linux/clkdev.h>
17#include <linux/clk/ti.h> 17#include <linux/clk/ti.h>
18 18
19#define DRA7_DPLL_ABE_DEFFREQ 361267200 19#define DRA7_DPLL_ABE_DEFFREQ 180633600
20#define DRA7_DPLL_GMAC_DEFFREQ 1000000000 20#define DRA7_DPLL_GMAC_DEFFREQ 1000000000
21 21
22 22
@@ -322,6 +322,11 @@ int __init dra7xx_dt_clk_init(void)
322 if (rc) 322 if (rc)
323 pr_err("%s: failed to configure ABE DPLL!\n", __func__); 323 pr_err("%s: failed to configure ABE DPLL!\n", __func__);
324 324
325 dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
326 rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
327 if (rc)
328 pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
329
325 dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck"); 330 dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
326 rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ); 331 rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
327 if (rc) 332 if (rc)
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index ebac67115009..7364a538e056 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -104,6 +104,7 @@ config ARM_IMX6Q_CPUFREQ
104 tristate "Freescale i.MX6 cpufreq support" 104 tristate "Freescale i.MX6 cpufreq support"
105 depends on ARCH_MXC 105 depends on ARCH_MXC
106 depends on REGULATOR_ANATOP 106 depends on REGULATOR_ANATOP
107 select PM_OPP
107 help 108 help
108 This adds cpufreq driver support for Freescale i.MX6 series SoCs. 109 This adds cpufreq driver support for Freescale i.MX6 series SoCs.
109 110
@@ -118,7 +119,7 @@ config ARM_INTEGRATOR
118 If in doubt, say Y. 119 If in doubt, say Y.
119 120
120config ARM_KIRKWOOD_CPUFREQ 121config ARM_KIRKWOOD_CPUFREQ
121 def_bool MACH_KIRKWOOD 122 def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD
122 help 123 help
123 This adds the CPUFreq driver for Marvell Kirkwood 124 This adds the CPUFreq driver for Marvell Kirkwood
124 SoCs. 125 SoCs.
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index ee1ae303a07c..86beda9f950b 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -152,11 +152,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
152 goto out_put_reg; 152 goto out_put_reg;
153 } 153 }
154 154
155 ret = of_init_opp_table(cpu_dev); 155 /* OPPs might be populated at runtime, don't check for error here */
156 if (ret) { 156 of_init_opp_table(cpu_dev);
157 pr_err("failed to init OPP table: %d\n", ret);
158 goto out_put_clk;
159 }
160 157
161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 158 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
162 if (ret) { 159 if (ret) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 62259d27f03e..6f024852c6fb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1153,10 +1153,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1153 * the creation of a brand new one. So we need to perform this update 1153 * the creation of a brand new one. So we need to perform this update
1154 * by invoking update_policy_cpu(). 1154 * by invoking update_policy_cpu().
1155 */ 1155 */
1156 if (recover_policy && cpu != policy->cpu) 1156 if (recover_policy && cpu != policy->cpu) {
1157 update_policy_cpu(policy, cpu); 1157 update_policy_cpu(policy, cpu);
1158 else 1158 WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
1159 } else {
1159 policy->cpu = cpu; 1160 policy->cpu = cpu;
1161 }
1160 1162
1161 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1163 cpumask_copy(policy->cpus, cpumask_of(cpu));
1162 1164
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 546376719d8f..b5befc211172 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -349,7 +349,7 @@ static int __init sa1110_clk_init(void)
349 name = "K4S641632D"; 349 name = "K4S641632D";
350 if (machine_is_h3100()) 350 if (machine_is_h3100())
351 name = "KM416S4030CT"; 351 name = "KM416S4030CT";
352 if (machine_is_jornada720()) 352 if (machine_is_jornada720() || machine_is_h3600())
353 name = "K4S281632B-1H"; 353 name = "K4S281632B-1H";
354 if (machine_is_nanoengine()) 354 if (machine_is_nanoengine())
355 name = "MT48LC8M16A2TG-75"; 355 name = "MT48LC8M16A2TG-75";
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 4199849e3758..145974f9662b 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,4 +1,5 @@
1menu "IEEE 1394 (FireWire) support" 1menu "IEEE 1394 (FireWire) support"
2 depends on HAS_DMA
2 depends on PCI || COMPILE_TEST 3 depends on PCI || COMPILE_TEST
3 # firewire-core does not depend on PCI but is 4 # firewire-core does not depend on PCI but is
4 # not useful without PCI controller driver 5 # not useful without PCI controller driver
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 57985410f12f..a66a3217f1d9 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -336,10 +336,10 @@ static const struct {
336 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE}, 336 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
337 337
338 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0, 338 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
339 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 339 QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
340 340
341 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID, 341 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
342 0}, 342 QUIRK_NO_MSI},
343 343
344 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, 344 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
345 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 345 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index eff1a2f22f09..dc79346689e6 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -346,6 +346,7 @@ static __initdata struct {
346 346
347struct param_info { 347struct param_info {
348 int verbose; 348 int verbose;
349 int found;
349 void *params; 350 void *params;
350}; 351};
351 352
@@ -362,16 +363,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
362 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 363 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
363 return 0; 364 return 0;
364 365
365 pr_info("Getting parameters from FDT:\n");
366
367 for (i = 0; i < ARRAY_SIZE(dt_params); i++) { 366 for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
368 prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len); 367 prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len);
369 if (!prop) { 368 if (!prop)
370 pr_err("Can't find %s in device tree!\n",
371 dt_params[i].name);
372 return 0; 369 return 0;
373 }
374 dest = info->params + dt_params[i].offset; 370 dest = info->params + dt_params[i].offset;
371 info->found++;
375 372
376 val = of_read_number(prop, len / sizeof(u32)); 373 val = of_read_number(prop, len / sizeof(u32));
377 374
@@ -390,10 +387,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
390int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose) 387int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose)
391{ 388{
392 struct param_info info; 389 struct param_info info;
390 int ret;
391
392 pr_info("Getting EFI parameters from FDT:\n");
393 393
394 info.verbose = verbose; 394 info.verbose = verbose;
395 info.found = 0;
395 info.params = params; 396 info.params = params;
396 397
397 return of_scan_flat_dt(fdt_find_uefi_params, &info); 398 ret = of_scan_flat_dt(fdt_find_uefi_params, &info);
399 if (!info.found)
400 pr_info("UEFI not found.\n");
401 else if (!ret)
402 pr_err("Can't find '%s' in device tree!\n",
403 dt_params[info.found].name);
404
405 return ret;
398} 406}
399#endif /* CONFIG_EFI_PARAMS_FROM_FDT */ 407#endif /* CONFIG_EFI_PARAMS_FROM_FDT */
diff --git a/drivers/firmware/efi/fdt.c b/drivers/firmware/efi/fdt.c
index 82d774161cc9..507a3df46a5d 100644
--- a/drivers/firmware/efi/fdt.c
+++ b/drivers/firmware/efi/fdt.c
@@ -23,16 +23,6 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
23 u32 fdt_val32; 23 u32 fdt_val32;
24 u64 fdt_val64; 24 u64 fdt_val64;
25 25
26 /*
27 * Copy definition of linux_banner here. Since this code is
28 * built as part of the decompressor for ARM v7, pulling
29 * in version.c where linux_banner is defined for the
30 * kernel brings other kernel dependencies with it.
31 */
32 const char linux_banner[] =
33 "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
34 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
35
36 /* Do some checks on provided FDT, if it exists*/ 26 /* Do some checks on provided FDT, if it exists*/
37 if (orig_fdt) { 27 if (orig_fdt) {
38 if (fdt_check_header(orig_fdt)) { 28 if (fdt_check_header(orig_fdt)) {
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index fe7c0e211f9a..57adbc90fdad 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -900,8 +900,6 @@ static int mcp23s08_probe(struct spi_device *spi)
900 if (spi_present_mask & (1 << addr)) 900 if (spi_present_mask & (1 << addr))
901 chips++; 901 chips++;
902 } 902 }
903 if (!chips)
904 return -ENODEV;
905 } else { 903 } else {
906 type = spi_get_device_id(spi)->driver_data; 904 type = spi_get_device_id(spi)->driver_data;
907 pdata = dev_get_platdata(&spi->dev); 905 pdata = dev_get_platdata(&spi->dev);
@@ -940,10 +938,6 @@ static int mcp23s08_probe(struct spi_device *spi)
940 if (!(spi_present_mask & (1 << addr))) 938 if (!(spi_present_mask & (1 << addr)))
941 continue; 939 continue;
942 chips--; 940 chips--;
943 if (chips < 0) {
944 dev_err(&spi->dev, "FATAL: invalid negative chip id\n");
945 goto fail;
946 }
947 data->mcp[addr] = &data->chip[chips]; 941 data->mcp[addr] = &data->chip[chips];
948 status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi, 942 status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
949 0x40 | (addr << 1), type, base, 943 0x40 | (addr << 1), type, base,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 0c9f803fc1ac..b6ae89ea8811 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
284 284
285static struct irq_domain_ops gpio_rcar_irq_domain_ops = { 285static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
286 .map = gpio_rcar_irq_domain_map, 286 .map = gpio_rcar_irq_domain_map,
287 .xlate = irq_domain_xlate_twocell,
287}; 288};
288 289
289struct gpio_rcar_info { 290struct gpio_rcar_info {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f36126383d26..d893e4da5dce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1616,22 +1616,6 @@ out:
1616 return ret; 1616 return ret;
1617} 1617}
1618 1618
1619void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1620{
1621 struct i915_vma *vma;
1622
1623 /*
1624 * Only the global gtt is relevant for gtt memory mappings, so restrict
1625 * list traversal to objects bound into the global address space. Note
1626 * that the active list should be empty, but better safe than sorry.
1627 */
1628 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1629 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1630 i915_gem_release_mmap(vma->obj);
1631 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1632 i915_gem_release_mmap(vma->obj);
1633}
1634
1635/** 1619/**
1636 * i915_gem_release_mmap - remove physical page mappings 1620 * i915_gem_release_mmap - remove physical page mappings
1637 * @obj: obj in question 1621 * @obj: obj in question
@@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1657 obj->fault_mappable = false; 1641 obj->fault_mappable = false;
1658} 1642}
1659 1643
1644void
1645i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1646{
1647 struct drm_i915_gem_object *obj;
1648
1649 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1650 i915_gem_release_mmap(obj);
1651}
1652
1660uint32_t 1653uint32_t
1661i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) 1654i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1662{ 1655{
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 3521f998a178..34894b573064 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -31,7 +31,7 @@
31struct i915_render_state { 31struct i915_render_state {
32 struct drm_i915_gem_object *obj; 32 struct drm_i915_gem_object *obj;
33 unsigned long ggtt_offset; 33 unsigned long ggtt_offset;
34 void *batch; 34 u32 *batch;
35 u32 size; 35 u32 size;
36 u32 len; 36 u32 len;
37}; 37};
@@ -80,7 +80,7 @@ free:
80 80
81static void render_state_free(struct i915_render_state *so) 81static void render_state_free(struct i915_render_state *so)
82{ 82{
83 kunmap(so->batch); 83 kunmap(kmap_to_page(so->batch));
84 i915_gem_object_ggtt_unpin(so->obj); 84 i915_gem_object_ggtt_unpin(so->obj);
85 drm_gem_object_unreference(&so->obj->base); 85 drm_gem_object_unreference(&so->obj->base);
86 kfree(so); 86 kfree(so);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 267f069765ad..c05c84f3f091 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
2845{ 2845{
2846 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2846 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2847 struct intel_engine_cs *signaller; 2847 struct intel_engine_cs *signaller;
2848 u32 seqno, ctl; 2848 u32 seqno;
2849 2849
2850 ring->hangcheck.deadlock++; 2850 ring->hangcheck.deadlock++;
2851 2851
@@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
2857 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2857 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2858 return -1; 2858 return -1;
2859 2859
2860 /* cursory check for an unkickable deadlock */
2861 ctl = I915_READ_CTL(signaller);
2862 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2863 return -1;
2864
2865 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2860 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2866 return 1; 2861 return 1;
2867 2862
2868 if (signaller->hangcheck.deadlock) 2863 /* cursory check for an unkickable deadlock */
2864 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2865 semaphore_passed(signaller) < 0)
2869 return -1; 2866 return -1;
2870 2867
2871 return 0; 2868 return 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e27e7804c0b9..f0be855ddf45 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11673,6 +11673,9 @@ static struct intel_quirk intel_quirks[] = {
11673 11673
11674 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 11674 /* Toshiba CB35 Chromebook (Celeron 2955U) */
11675 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 11675 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
11676
11677 /* HP Chromebook 14 (Celeron 2955U) */
11678 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
11676}; 11679};
11677 11680
11678static void intel_init_quirks(struct drm_device *dev) 11681static void intel_init_quirks(struct drm_device *dev)
@@ -11911,6 +11914,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
11911 * ... */ 11914 * ... */
11912 plane = crtc->plane; 11915 plane = crtc->plane;
11913 crtc->plane = !plane; 11916 crtc->plane = !plane;
11917 crtc->primary_enabled = true;
11914 dev_priv->display.crtc_disable(&crtc->base); 11918 dev_priv->display.crtc_disable(&crtc->base);
11915 crtc->plane = plane; 11919 crtc->plane = plane;
11916 11920
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 075170d1844f..8a1a4fbc06ac 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -906,8 +906,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
906 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 906 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
907 bpp); 907 bpp);
908 908
909 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { 909 for (clock = min_clock; clock <= max_clock; clock++) {
910 for (clock = min_clock; clock <= max_clock; clock++) { 910 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
911 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); 911 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
912 link_avail = intel_dp_max_data_rate(link_clock, 912 link_avail = intel_dp_max_data_rate(link_clock,
913 lane_count); 913 lane_count);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 23126023aeba..5e5a72fca5fb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -111,6 +111,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
111 111
112 pipe_config->adjusted_mode.flags |= flags; 112 pipe_config->adjusted_mode.flags |= flags;
113 113
114 /* gen2/3 store dither state in pfit control, needs to match */
115 if (INTEL_INFO(dev)->gen < 4) {
116 tmp = I915_READ(PFIT_CONTROL);
117
118 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
119 }
120
114 dotclock = pipe_config->port_clock; 121 dotclock = pipe_config->port_clock;
115 122
116 if (HAS_PCH_SPLIT(dev_priv->dev)) 123 if (HAS_PCH_SPLIT(dev_priv->dev))
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 628cd8938274..12b02fe1d0ae 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -361,16 +361,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
361 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | 361 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
362 PFIT_FILTER_FUZZY); 362 PFIT_FILTER_FUZZY);
363 363
364 /* Make sure pre-965 set dither correctly for 18bpp panels. */
365 if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
366 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
367
368out: 364out:
369 if ((pfit_control & PFIT_ENABLE) == 0) { 365 if ((pfit_control & PFIT_ENABLE) == 0) {
370 pfit_control = 0; 366 pfit_control = 0;
371 pfit_pgm_ratios = 0; 367 pfit_pgm_ratios = 0;
372 } 368 }
373 369
370 /* Make sure pre-965 set dither correctly for 18bpp panels. */
371 if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
372 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
373
374 pipe_config->gmch_pfit.control = pfit_control; 374 pipe_config->gmch_pfit.control = pfit_control;
375 pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios; 375 pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
376 pipe_config->gmch_pfit.lvds_border_bits = border; 376 pipe_config->gmch_pfit.lvds_border_bits = border;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index cfde9eb44ad0..6212537b90c5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -192,11 +192,11 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
192 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown, 192 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
193 NOUVEAU_THERM_THRS_SHUTDOWN); 193 NOUVEAU_THERM_THRS_SHUTDOWN);
194 194
195 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
196
195 /* schedule the next poll in one second */ 197 /* schedule the next poll in one second */
196 if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) 198 if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
197 ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); 199 ptimer->alarm(ptimer, 1000000000ULL, alarm);
198
199 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
200} 200}
201 201
202void 202void
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 34d6a85e9023..0bf1e20c6e44 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
33 33
34 pending = xchg(&qdev->ram_header->int_pending, 0); 34 pending = xchg(&qdev->ram_header->int_pending, 0);
35 35
36 if (!pending)
37 return IRQ_NONE;
38
36 atomic_inc(&qdev->irq_received); 39 atomic_inc(&qdev->irq_received);
37 40
38 if (pending & QXL_INTERRUPT_DISPLAY) { 41 if (pending & QXL_INTERRUPT_DISPLAY) {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a03c73411a56..30d242b25078 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1414,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1414 tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; 1414 tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
1415 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); 1415 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
1416 1416
1417 /* set pageflip to happen anywhere in vblank interval */ 1417 /* set pageflip to happen only at start of vblank interval (front porch) */
1418 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 1418 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
1419 1419
1420 if (!atomic && fb && fb != crtc->primary->fb) { 1420 if (!atomic && fb && fb != crtc->primary->fb) {
1421 radeon_fb = to_radeon_framebuffer(fb); 1421 radeon_fb = to_radeon_framebuffer(fb);
@@ -1614,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1614 tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; 1614 tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
1615 WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); 1615 WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
1616 1616
1617 /* set pageflip to happen anywhere in vblank interval */ 1617 /* set pageflip to happen only at start of vblank interval (front porch) */
1618 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 1618 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
1619 1619
1620 if (!atomic && fb && fb != crtc->primary->fb) { 1620 if (!atomic && fb && fb != crtc->primary->fb) {
1621 radeon_fb = to_radeon_framebuffer(fb); 1621 radeon_fb = to_radeon_framebuffer(fb);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2b2908440644..7d68203a3737 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
183 struct backlight_properties props; 183 struct backlight_properties props;
184 struct radeon_backlight_privdata *pdata; 184 struct radeon_backlight_privdata *pdata;
185 struct radeon_encoder_atom_dig *dig; 185 struct radeon_encoder_atom_dig *dig;
186 u8 backlight_level;
187 char bl_name[16]; 186 char bl_name[16];
188 187
189 /* Mac laptops with multiple GPUs use the gmux driver for backlight 188 /* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
222 221
223 pdata->encoder = radeon_encoder; 222 pdata->encoder = radeon_encoder;
224 223
225 backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
226
227 dig = radeon_encoder->enc_priv; 224 dig = radeon_encoder->enc_priv;
228 dig->bl_dev = bd; 225 dig->bl_dev = bd;
229 226
230 bd->props.brightness = radeon_atom_backlight_get_brightness(bd); 227 bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
228 /* Set a reasonable default here if the level is 0 otherwise
229 * fbdev will attempt to turn the backlight on after console
230 * unblanking and it will try and restore 0 which turns the backlight
231 * off again.
232 */
233 if (bd->props.brightness == 0)
234 bd->props.brightness = RADEON_MAX_BL_LEVEL;
231 bd->props.power = FB_BLANK_UNBLANK; 235 bd->props.power = FB_BLANK_UNBLANK;
232 backlight_update_status(bd); 236 backlight_update_status(bd);
233 237
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0b2471107137..c0ea66192fe0 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2291 gb_tile_moden = 0; 2291 gb_tile_moden = 0;
2292 break; 2292 break;
2293 } 2293 }
2294 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
2294 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2295 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2295 } 2296 }
2296 } else if (num_pipe_configs == 8) { 2297 } else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
7376 tmp = RREG32(IH_RB_CNTL); 7377 tmp = RREG32(IH_RB_CNTL);
7377 tmp |= IH_WPTR_OVERFLOW_CLEAR; 7378 tmp |= IH_WPTR_OVERFLOW_CLEAR;
7378 WREG32(IH_RB_CNTL, tmp); 7379 WREG32(IH_RB_CNTL, tmp);
7380 wptr &= ~RB_OVERFLOW;
7379 } 7381 }
7380 return (wptr & rdev->ih.ptr_mask); 7382 return (wptr & rdev->ih.ptr_mask);
7381} 7383}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f7ece0ff431b..15e4f28015e1 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
2642 for (i = 0; i < rdev->num_crtc; i++) { 2642 for (i = 0; i < rdev->num_crtc; i++) {
2643 if (save->crtc_enabled[i]) { 2643 if (save->crtc_enabled[i]) {
2644 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); 2644 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
2645 if ((tmp & 0x3) != 0) { 2645 if ((tmp & 0x7) != 3) {
2646 tmp &= ~0x3; 2646 tmp &= ~0x7;
2647 tmp |= 0x3;
2647 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); 2648 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
2648 } 2649 }
2649 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); 2650 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -4755,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
4755 tmp = RREG32(IH_RB_CNTL); 4756 tmp = RREG32(IH_RB_CNTL);
4756 tmp |= IH_WPTR_OVERFLOW_CLEAR; 4757 tmp |= IH_WPTR_OVERFLOW_CLEAR;
4757 WREG32(IH_RB_CNTL, tmp); 4758 WREG32(IH_RB_CNTL, tmp);
4759 wptr &= ~RB_OVERFLOW;
4758 } 4760 }
4759 return (wptr & rdev->ih.ptr_mask); 4761 return (wptr & rdev->ih.ptr_mask);
4760} 4762}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 333d143fca2c..23bff590fb6e 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -239,7 +239,6 @@
239# define EVERGREEN_CRTC_V_BLANK (1 << 0) 239# define EVERGREEN_CRTC_V_BLANK (1 << 0)
240#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 240#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
241#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0 241#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
242#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
243#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 242#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
244#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4 243#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
245#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 244#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c66952d4b00c..3c69f58e46ef 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3795 tmp = RREG32(IH_RB_CNTL); 3795 tmp = RREG32(IH_RB_CNTL);
3796 tmp |= IH_WPTR_OVERFLOW_CLEAR; 3796 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3797 WREG32(IH_RB_CNTL, tmp); 3797 WREG32(IH_RB_CNTL, tmp);
3798 wptr &= ~RB_OVERFLOW;
3798 } 3799 }
3799 return (wptr & rdev->ih.ptr_mask); 3800 return (wptr & rdev->ih.ptr_mask);
3800} 3801}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 29d9cc04c04e..60c47f829122 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -449,6 +449,7 @@ struct radeon_bo_va {
449 449
450 /* protected by vm mutex */ 450 /* protected by vm mutex */
451 struct list_head vm_list; 451 struct list_head vm_list;
452 struct list_head vm_status;
452 453
453 /* constant after initialization */ 454 /* constant after initialization */
454 struct radeon_vm *vm; 455 struct radeon_vm *vm;
@@ -684,10 +685,9 @@ struct radeon_flip_work {
684 struct work_struct unpin_work; 685 struct work_struct unpin_work;
685 struct radeon_device *rdev; 686 struct radeon_device *rdev;
686 int crtc_id; 687 int crtc_id;
687 struct drm_framebuffer *fb; 688 uint64_t base;
688 struct drm_pending_vblank_event *event; 689 struct drm_pending_vblank_event *event;
689 struct radeon_bo *old_rbo; 690 struct radeon_bo *old_rbo;
690 struct radeon_bo *new_rbo;
691 struct radeon_fence *fence; 691 struct radeon_fence *fence;
692}; 692};
693 693
@@ -868,6 +868,9 @@ struct radeon_vm {
868 struct list_head va; 868 struct list_head va;
869 unsigned id; 869 unsigned id;
870 870
871 /* BOs freed, but not yet updated in the PT */
872 struct list_head freed;
873
871 /* contains the page directory */ 874 /* contains the page directory */
872 struct radeon_bo *page_directory; 875 struct radeon_bo *page_directory;
873 uint64_t pd_gpu_addr; 876 uint64_t pd_gpu_addr;
@@ -876,6 +879,8 @@ struct radeon_vm {
876 /* array of page tables, one for each page directory entry */ 879 /* array of page tables, one for each page directory entry */
877 struct radeon_vm_pt *page_tables; 880 struct radeon_vm_pt *page_tables;
878 881
882 struct radeon_bo_va *ib_bo_va;
883
879 struct mutex mutex; 884 struct mutex mutex;
880 /* last fence for cs using this vm */ 885 /* last fence for cs using this vm */
881 struct radeon_fence *fence; 886 struct radeon_fence *fence;
@@ -2833,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
2833uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); 2838uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
2834int radeon_vm_update_page_directory(struct radeon_device *rdev, 2839int radeon_vm_update_page_directory(struct radeon_device *rdev,
2835 struct radeon_vm *vm); 2840 struct radeon_vm *vm);
2841int radeon_vm_clear_freed(struct radeon_device *rdev,
2842 struct radeon_vm *vm);
2836int radeon_vm_bo_update(struct radeon_device *rdev, 2843int radeon_vm_bo_update(struct radeon_device *rdev,
2837 struct radeon_vm *vm, 2844 struct radeon_bo_va *bo_va,
2838 struct radeon_bo *bo,
2839 struct ttm_mem_reg *mem); 2845 struct ttm_mem_reg *mem);
2840void radeon_vm_bo_invalidate(struct radeon_device *rdev, 2846void radeon_vm_bo_invalidate(struct radeon_device *rdev,
2841 struct radeon_bo *bo); 2847 struct radeon_bo *bo);
@@ -2848,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
2848 struct radeon_bo_va *bo_va, 2854 struct radeon_bo_va *bo_va,
2849 uint64_t offset, 2855 uint64_t offset,
2850 uint32_t flags); 2856 uint32_t flags);
2851int radeon_vm_bo_rmv(struct radeon_device *rdev, 2857void radeon_vm_bo_rmv(struct radeon_device *rdev,
2852 struct radeon_bo_va *bo_va); 2858 struct radeon_bo_va *bo_va);
2853 2859
2854/* audio */ 2860/* audio */
2855void r600_audio_update_hdmi(struct work_struct *work); 2861void r600_audio_update_hdmi(struct work_struct *work);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 71a143461478..ae763f60c8a0 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
461 struct radeon_vm *vm) 461 struct radeon_vm *vm)
462{ 462{
463 struct radeon_device *rdev = p->rdev; 463 struct radeon_device *rdev = p->rdev;
464 struct radeon_bo_va *bo_va;
464 int i, r; 465 int i, r;
465 466
466 r = radeon_vm_update_page_directory(rdev, vm); 467 r = radeon_vm_update_page_directory(rdev, vm);
467 if (r) 468 if (r)
468 return r; 469 return r;
469 470
470 r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, 471 r = radeon_vm_clear_freed(rdev, vm);
472 if (r)
473 return r;
474
475 if (vm->ib_bo_va == NULL) {
476 DRM_ERROR("Tmp BO not in VM!\n");
477 return -EINVAL;
478 }
479
480 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
471 &rdev->ring_tmp_bo.bo->tbo.mem); 481 &rdev->ring_tmp_bo.bo->tbo.mem);
472 if (r) 482 if (r)
473 return r; 483 return r;
@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
480 continue; 490 continue;
481 491
482 bo = p->relocs[i].robj; 492 bo = p->relocs[i].robj;
483 r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem); 493 bo_va = radeon_vm_bo_find(vm, bo);
494 if (bo_va == NULL) {
495 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
496 return -EINVAL;
497 }
498
499 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
484 if (r) 500 if (r)
485 return r; 501 return r;
486 } 502 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 03686fab842d..697add2cd4e3 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1056 if (!radeon_check_pot_argument(radeon_vm_size)) { 1056 if (!radeon_check_pot_argument(radeon_vm_size)) {
1057 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n", 1057 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1058 radeon_vm_size); 1058 radeon_vm_size);
1059 radeon_vm_size = 4096; 1059 radeon_vm_size = 4;
1060 } 1060 }
1061 1061
1062 if (radeon_vm_size < 4) { 1062 if (radeon_vm_size < 1) {
1063 dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n", 1063 dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
1064 radeon_vm_size); 1064 radeon_vm_size);
1065 radeon_vm_size = 4096; 1065 radeon_vm_size = 4;
1066 } 1066 }
1067 1067
1068 /* 1068 /*
1069 * Max GPUVM size for Cayman, SI and CI are 40 bits. 1069 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1070 */ 1070 */
1071 if (radeon_vm_size > 1024*1024) { 1071 if (radeon_vm_size > 1024) {
1072 dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n", 1072 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1073 radeon_vm_size); 1073 radeon_vm_size);
1074 radeon_vm_size = 4096; 1074 radeon_vm_size = 4;
1075 } 1075 }
1076 1076
1077 /* defines number of bits in page table versus page directory, 1077 /* defines number of bits in page table versus page directory,
1078 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1078 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1079 * page table and the remaining bits are in the page directory */ 1079 * page table and the remaining bits are in the page directory */
1080 if (radeon_vm_block_size < 9) { 1080 if (radeon_vm_block_size < 9) {
1081 dev_warn(rdev->dev, "VM page table size (%d) to small\n", 1081 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1082 radeon_vm_block_size); 1082 radeon_vm_block_size);
1083 radeon_vm_block_size = 9; 1083 radeon_vm_block_size = 9;
1084 } 1084 }
1085 1085
1086 if (radeon_vm_block_size > 24 || 1086 if (radeon_vm_block_size > 24 ||
1087 radeon_vm_size < (1ull << radeon_vm_block_size)) { 1087 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1088 dev_warn(rdev->dev, "VM page table size (%d) to large\n", 1088 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1089 radeon_vm_block_size); 1089 radeon_vm_block_size);
1090 radeon_vm_block_size = 9; 1090 radeon_vm_block_size = 9;
1091 } 1091 }
@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
1238 /* Adjust VM size here. 1238 /* Adjust VM size here.
1239 * Max GPUVM size for cayman+ is 40 bits. 1239 * Max GPUVM size for cayman+ is 40 bits.
1240 */ 1240 */
1241 rdev->vm_manager.max_pfn = radeon_vm_size << 8; 1241 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1242 1242
1243 /* Set asic functions */ 1243 /* Set asic functions */
1244 r = radeon_asic_init(rdev); 1244 r = radeon_asic_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 13896edcf0b6..bf25061c8ac4 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -366,7 +366,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
366 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 366 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
367 367
368 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); 368 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
369 radeon_fence_unref(&work->fence);
370 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); 369 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
371 queue_work(radeon_crtc->flip_queue, &work->unpin_work); 370 queue_work(radeon_crtc->flip_queue, &work->unpin_work);
372} 371}
@@ -386,51 +385,108 @@ static void radeon_flip_work_func(struct work_struct *__work)
386 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; 385 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
387 386
388 struct drm_crtc *crtc = &radeon_crtc->base; 387 struct drm_crtc *crtc = &radeon_crtc->base;
389 struct drm_framebuffer *fb = work->fb;
390
391 uint32_t tiling_flags, pitch_pixels;
392 uint64_t base;
393
394 unsigned long flags; 388 unsigned long flags;
395 int r; 389 int r;
396 390
397 down_read(&rdev->exclusive_lock); 391 down_read(&rdev->exclusive_lock);
398 while (work->fence) { 392 if (work->fence) {
399 r = radeon_fence_wait(work->fence, false); 393 r = radeon_fence_wait(work->fence, false);
400 if (r == -EDEADLK) { 394 if (r == -EDEADLK) {
401 up_read(&rdev->exclusive_lock); 395 up_read(&rdev->exclusive_lock);
402 r = radeon_gpu_reset(rdev); 396 r = radeon_gpu_reset(rdev);
403 down_read(&rdev->exclusive_lock); 397 down_read(&rdev->exclusive_lock);
404 } 398 }
399 if (r)
400 DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
405 401
406 if (r) { 402 /* We continue with the page flip even if we failed to wait on
407 DRM_ERROR("failed to wait on page flip fence (%d)!\n", 403 * the fence, otherwise the DRM core and userspace will be
408 r); 404 * confused about which BO the CRTC is scanning out
409 goto cleanup; 405 */
410 } else 406
411 radeon_fence_unref(&work->fence); 407 radeon_fence_unref(&work->fence);
412 } 408 }
413 409
410 /* We borrow the event spin lock for protecting flip_status */
411 spin_lock_irqsave(&crtc->dev->event_lock, flags);
412
413 /* set the proper interrupt */
414 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
415
416 /* do the flip (mmio) */
417 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
418
419 radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
420 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
421 up_read(&rdev->exclusive_lock);
422}
423
424static int radeon_crtc_page_flip(struct drm_crtc *crtc,
425 struct drm_framebuffer *fb,
426 struct drm_pending_vblank_event *event,
427 uint32_t page_flip_flags)
428{
429 struct drm_device *dev = crtc->dev;
430 struct radeon_device *rdev = dev->dev_private;
431 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
432 struct radeon_framebuffer *old_radeon_fb;
433 struct radeon_framebuffer *new_radeon_fb;
434 struct drm_gem_object *obj;
435 struct radeon_flip_work *work;
436 struct radeon_bo *new_rbo;
437 uint32_t tiling_flags, pitch_pixels;
438 uint64_t base;
439 unsigned long flags;
440 int r;
441
442 work = kzalloc(sizeof *work, GFP_KERNEL);
443 if (work == NULL)
444 return -ENOMEM;
445
446 INIT_WORK(&work->flip_work, radeon_flip_work_func);
447 INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
448
449 work->rdev = rdev;
450 work->crtc_id = radeon_crtc->crtc_id;
451 work->event = event;
452
453 /* schedule unpin of the old buffer */
454 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
455 obj = old_radeon_fb->obj;
456
457 /* take a reference to the old object */
458 drm_gem_object_reference(obj);
459 work->old_rbo = gem_to_radeon_bo(obj);
460
461 new_radeon_fb = to_radeon_framebuffer(fb);
462 obj = new_radeon_fb->obj;
463 new_rbo = gem_to_radeon_bo(obj);
464
465 spin_lock(&new_rbo->tbo.bdev->fence_lock);
466 if (new_rbo->tbo.sync_obj)
467 work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
468 spin_unlock(&new_rbo->tbo.bdev->fence_lock);
469
414 /* pin the new buffer */ 470 /* pin the new buffer */
415 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", 471 DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
416 work->old_rbo, work->new_rbo); 472 work->old_rbo, new_rbo);
417 473
418 r = radeon_bo_reserve(work->new_rbo, false); 474 r = radeon_bo_reserve(new_rbo, false);
419 if (unlikely(r != 0)) { 475 if (unlikely(r != 0)) {
420 DRM_ERROR("failed to reserve new rbo buffer before flip\n"); 476 DRM_ERROR("failed to reserve new rbo buffer before flip\n");
421 goto cleanup; 477 goto cleanup;
422 } 478 }
423 /* Only 27 bit offset for legacy CRTC */ 479 /* Only 27 bit offset for legacy CRTC */
424 r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM, 480 r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
425 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); 481 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
426 if (unlikely(r != 0)) { 482 if (unlikely(r != 0)) {
427 radeon_bo_unreserve(work->new_rbo); 483 radeon_bo_unreserve(new_rbo);
428 r = -EINVAL; 484 r = -EINVAL;
429 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 485 DRM_ERROR("failed to pin new rbo buffer before flip\n");
430 goto cleanup; 486 goto cleanup;
431 } 487 }
432 radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL); 488 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
433 radeon_bo_unreserve(work->new_rbo); 489 radeon_bo_unreserve(new_rbo);
434 490
435 if (!ASIC_IS_AVIVO(rdev)) { 491 if (!ASIC_IS_AVIVO(rdev)) {
436 /* crtc offset is from display base addr not FB location */ 492 /* crtc offset is from display base addr not FB location */
@@ -467,6 +523,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
467 } 523 }
468 base &= ~7; 524 base &= ~7;
469 } 525 }
526 work->base = base;
470 527
471 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); 528 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
472 if (r) { 529 if (r) {
@@ -477,100 +534,42 @@ static void radeon_flip_work_func(struct work_struct *__work)
477 /* We borrow the event spin lock for protecting flip_work */ 534 /* We borrow the event spin lock for protecting flip_work */
478 spin_lock_irqsave(&crtc->dev->event_lock, flags); 535 spin_lock_irqsave(&crtc->dev->event_lock, flags);
479 536
480 /* set the proper interrupt */ 537 if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
481 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); 538 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
539 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
540 r = -EBUSY;
541 goto vblank_cleanup;
542 }
543 radeon_crtc->flip_status = RADEON_FLIP_PENDING;
544 radeon_crtc->flip_work = work;
482 545
483 /* do the flip (mmio) */ 546 /* update crtc fb */
484 radeon_page_flip(rdev, radeon_crtc->crtc_id, base); 547 crtc->primary->fb = fb;
485 548
486 radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
487 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 549 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
488 up_read(&rdev->exclusive_lock);
489 550
490 return; 551 queue_work(radeon_crtc->flip_queue, &work->flip_work);
552 return 0;
553
554vblank_cleanup:
555 drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
491 556
492pflip_cleanup: 557pflip_cleanup:
493 if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) { 558 if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
494 DRM_ERROR("failed to reserve new rbo in error path\n"); 559 DRM_ERROR("failed to reserve new rbo in error path\n");
495 goto cleanup; 560 goto cleanup;
496 } 561 }
497 if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) { 562 if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
498 DRM_ERROR("failed to unpin new rbo in error path\n"); 563 DRM_ERROR("failed to unpin new rbo in error path\n");
499 } 564 }
500 radeon_bo_unreserve(work->new_rbo); 565 radeon_bo_unreserve(new_rbo);
501 566
502cleanup: 567cleanup:
503 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 568 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
504 radeon_fence_unref(&work->fence); 569 radeon_fence_unref(&work->fence);
505 kfree(work); 570 kfree(work);
506 up_read(&rdev->exclusive_lock);
507}
508
509static int radeon_crtc_page_flip(struct drm_crtc *crtc,
510 struct drm_framebuffer *fb,
511 struct drm_pending_vblank_event *event,
512 uint32_t page_flip_flags)
513{
514 struct drm_device *dev = crtc->dev;
515 struct radeon_device *rdev = dev->dev_private;
516 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
517 struct radeon_framebuffer *old_radeon_fb;
518 struct radeon_framebuffer *new_radeon_fb;
519 struct drm_gem_object *obj;
520 struct radeon_flip_work *work;
521 unsigned long flags;
522
523 work = kzalloc(sizeof *work, GFP_KERNEL);
524 if (work == NULL)
525 return -ENOMEM;
526
527 INIT_WORK(&work->flip_work, radeon_flip_work_func);
528 INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
529
530 work->rdev = rdev;
531 work->crtc_id = radeon_crtc->crtc_id;
532 work->fb = fb;
533 work->event = event;
534
535 /* schedule unpin of the old buffer */
536 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
537 obj = old_radeon_fb->obj;
538
539 /* take a reference to the old object */
540 drm_gem_object_reference(obj);
541 work->old_rbo = gem_to_radeon_bo(obj);
542
543 new_radeon_fb = to_radeon_framebuffer(fb);
544 obj = new_radeon_fb->obj;
545 work->new_rbo = gem_to_radeon_bo(obj);
546
547 spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
548 if (work->new_rbo->tbo.sync_obj)
549 work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
550 spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
551
552 /* We borrow the event spin lock for protecting flip_work */
553 spin_lock_irqsave(&crtc->dev->event_lock, flags);
554 571
555 if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { 572 return r;
556 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
557 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
558 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
559 radeon_fence_unref(&work->fence);
560 kfree(work);
561 return -EBUSY;
562 }
563 radeon_crtc->flip_status = RADEON_FLIP_PENDING;
564 radeon_crtc->flip_work = work;
565
566 /* update crtc fb */
567 crtc->primary->fb = fb;
568
569 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
570
571 queue_work(radeon_crtc->flip_queue, &work->flip_work);
572
573 return 0;
574} 573}
575 574
576static int 575static int
@@ -830,6 +829,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
830 struct radeon_device *rdev = dev->dev_private; 829 struct radeon_device *rdev = dev->dev_private;
831 int ret = 0; 830 int ret = 0;
832 831
832 /* don't leak the edid if we already fetched it in detect() */
833 if (radeon_connector->edid)
834 goto got_edid;
835
833 /* on hw with routers, select right port */ 836 /* on hw with routers, select right port */
834 if (radeon_connector->router.ddc_valid) 837 if (radeon_connector->router.ddc_valid)
835 radeon_router_select_ddc_port(radeon_connector); 838 radeon_router_select_ddc_port(radeon_connector);
@@ -868,6 +871,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
868 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); 871 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
869 } 872 }
870 if (radeon_connector->edid) { 873 if (radeon_connector->edid) {
874got_edid:
871 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); 875 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
872 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); 876 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
873 drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); 877 drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb1421369e3a..e9e361084249 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -173,7 +173,7 @@ int radeon_dpm = -1;
173int radeon_aspm = -1; 173int radeon_aspm = -1;
174int radeon_runtime_pm = -1; 174int radeon_runtime_pm = -1;
175int radeon_hard_reset = 0; 175int radeon_hard_reset = 0;
176int radeon_vm_size = 4096; 176int radeon_vm_size = 4;
177int radeon_vm_block_size = 9; 177int radeon_vm_block_size = 9;
178int radeon_deep_color = 0; 178int radeon_deep_color = 0;
179 179
@@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
243MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); 243MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
244module_param_named(hard_reset, radeon_hard_reset, int, 0444); 244module_param_named(hard_reset, radeon_hard_reset, int, 0444);
245 245
246MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)"); 246MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
247module_param_named(vm_size, radeon_vm_size, int, 0444); 247module_param_named(vm_size, radeon_vm_size, int, 0444);
248 248
249MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)"); 249MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 35d931881b4b..d25ae6acfd5a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
579 /* new gpu have virtual address space support */ 579 /* new gpu have virtual address space support */
580 if (rdev->family >= CHIP_CAYMAN) { 580 if (rdev->family >= CHIP_CAYMAN) {
581 struct radeon_fpriv *fpriv; 581 struct radeon_fpriv *fpriv;
582 struct radeon_bo_va *bo_va; 582 struct radeon_vm *vm;
583 int r; 583 int r;
584 584
585 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 585 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
587 return -ENOMEM; 587 return -ENOMEM;
588 } 588 }
589 589
590 r = radeon_vm_init(rdev, &fpriv->vm); 590 vm = &fpriv->vm;
591 r = radeon_vm_init(rdev, vm);
591 if (r) { 592 if (r) {
592 kfree(fpriv); 593 kfree(fpriv);
593 return r; 594 return r;
@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
596 if (rdev->accel_working) { 597 if (rdev->accel_working) {
597 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 598 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
598 if (r) { 599 if (r) {
599 radeon_vm_fini(rdev, &fpriv->vm); 600 radeon_vm_fini(rdev, vm);
600 kfree(fpriv); 601 kfree(fpriv);
601 return r; 602 return r;
602 } 603 }
603 604
604 /* map the ib pool buffer read only into 605 /* map the ib pool buffer read only into
605 * virtual address space */ 606 * virtual address space */
606 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, 607 vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
607 rdev->ring_tmp_bo.bo); 608 rdev->ring_tmp_bo.bo);
608 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, 609 r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
610 RADEON_VA_IB_OFFSET,
609 RADEON_VM_PAGE_READABLE | 611 RADEON_VM_PAGE_READABLE |
610 RADEON_VM_PAGE_SNOOPED); 612 RADEON_VM_PAGE_SNOOPED);
611 613
612 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 614 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
613 if (r) { 615 if (r) {
614 radeon_vm_fini(rdev, &fpriv->vm); 616 radeon_vm_fini(rdev, vm);
615 kfree(fpriv); 617 kfree(fpriv);
616 return r; 618 return r;
617 } 619 }
@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
640 /* new gpu have virtual address space support */ 642 /* new gpu have virtual address space support */
641 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 643 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
642 struct radeon_fpriv *fpriv = file_priv->driver_priv; 644 struct radeon_fpriv *fpriv = file_priv->driver_priv;
643 struct radeon_bo_va *bo_va; 645 struct radeon_vm *vm = &fpriv->vm;
644 int r; 646 int r;
645 647
646 if (rdev->accel_working) { 648 if (rdev->accel_working) {
647 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 649 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
648 if (!r) { 650 if (!r) {
649 bo_va = radeon_vm_bo_find(&fpriv->vm, 651 if (vm->ib_bo_va)
650 rdev->ring_tmp_bo.bo); 652 radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
651 if (bo_va)
652 radeon_vm_bo_rmv(rdev, bo_va);
653 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 653 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
654 } 654 }
655 } 655 }
656 656
657 radeon_vm_fini(rdev, &fpriv->vm); 657 radeon_vm_fini(rdev, vm);
658 kfree(fpriv); 658 kfree(fpriv);
659 file_priv->driver_priv = NULL; 659 file_priv->driver_priv = NULL;
660 } 660 }
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index eecff6bbd341..725d3669014f 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
332 bo_va->ref_count = 1; 332 bo_va->ref_count = 1;
333 INIT_LIST_HEAD(&bo_va->bo_list); 333 INIT_LIST_HEAD(&bo_va->bo_list);
334 INIT_LIST_HEAD(&bo_va->vm_list); 334 INIT_LIST_HEAD(&bo_va->vm_list);
335 INIT_LIST_HEAD(&bo_va->vm_status);
335 336
336 mutex_lock(&vm->mutex); 337 mutex_lock(&vm->mutex);
337 list_add(&bo_va->vm_list, &vm->va); 338 list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
468 head = &tmp->vm_list; 469 head = &tmp->vm_list;
469 } 470 }
470 471
472 if (bo_va->soffset) {
473 /* add a clone of the bo_va to clear the old address */
474 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
475 if (!tmp) {
476 mutex_unlock(&vm->mutex);
477 return -ENOMEM;
478 }
479 tmp->soffset = bo_va->soffset;
480 tmp->eoffset = bo_va->eoffset;
481 tmp->vm = vm;
482 list_add(&tmp->vm_status, &vm->freed);
483 }
484
471 bo_va->soffset = soffset; 485 bo_va->soffset = soffset;
472 bo_va->eoffset = eoffset; 486 bo_va->eoffset = eoffset;
473 bo_va->flags = flags; 487 bo_va->flags = flags;
@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
823 * Object have to be reserved and mutex must be locked! 837 * Object have to be reserved and mutex must be locked!
824 */ 838 */
825int radeon_vm_bo_update(struct radeon_device *rdev, 839int radeon_vm_bo_update(struct radeon_device *rdev,
826 struct radeon_vm *vm, 840 struct radeon_bo_va *bo_va,
827 struct radeon_bo *bo,
828 struct ttm_mem_reg *mem) 841 struct ttm_mem_reg *mem)
829{ 842{
843 struct radeon_vm *vm = bo_va->vm;
830 struct radeon_ib ib; 844 struct radeon_ib ib;
831 struct radeon_bo_va *bo_va;
832 unsigned nptes, ndw; 845 unsigned nptes, ndw;
833 uint64_t addr; 846 uint64_t addr;
834 int r; 847 int r;
835 848
836 bo_va = radeon_vm_bo_find(vm, bo);
837 if (bo_va == NULL) {
838 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
839 return -EINVAL;
840 }
841 849
842 if (!bo_va->soffset) { 850 if (!bo_va->soffset) {
843 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", 851 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
844 bo, vm); 852 bo_va->bo, vm);
845 return -EINVAL; 853 return -EINVAL;
846 } 854 }
847 855
@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
868 876
869 trace_radeon_vm_bo_update(bo_va); 877 trace_radeon_vm_bo_update(bo_va);
870 878
871 nptes = radeon_bo_ngpu_pages(bo); 879 nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
872 880
873 /* padding, etc. */ 881 /* padding, etc. */
874 ndw = 64; 882 ndw = 64;
@@ -911,33 +919,61 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
911} 919}
912 920
913/** 921/**
922 * radeon_vm_clear_freed - clear freed BOs in the PT
923 *
924 * @rdev: radeon_device pointer
925 * @vm: requested vm
926 *
927 * Make sure all freed BOs are cleared in the PT.
928 * Returns 0 for success.
929 *
930 * PTs have to be reserved and mutex must be locked!
931 */
932int radeon_vm_clear_freed(struct radeon_device *rdev,
933 struct radeon_vm *vm)
934{
935 struct radeon_bo_va *bo_va, *tmp;
936 int r;
937
938 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
939 list_del(&bo_va->vm_status);
940 r = radeon_vm_bo_update(rdev, bo_va, NULL);
941 kfree(bo_va);
942 if (r)
943 return r;
944 }
945 return 0;
946
947}
948
949/**
914 * radeon_vm_bo_rmv - remove a bo to a specific vm 950 * radeon_vm_bo_rmv - remove a bo to a specific vm
915 * 951 *
916 * @rdev: radeon_device pointer 952 * @rdev: radeon_device pointer
917 * @bo_va: requested bo_va 953 * @bo_va: requested bo_va
918 * 954 *
919 * Remove @bo_va->bo from the requested vm (cayman+). 955 * Remove @bo_va->bo from the requested vm (cayman+).
920 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
921 * remove the ptes for @bo_va in the page table.
922 * Returns 0 for success.
923 * 956 *
924 * Object have to be reserved! 957 * Object have to be reserved!
925 */ 958 */
926int radeon_vm_bo_rmv(struct radeon_device *rdev, 959void radeon_vm_bo_rmv(struct radeon_device *rdev,
927 struct radeon_bo_va *bo_va) 960 struct radeon_bo_va *bo_va)
928{ 961{
929 int r = 0; 962 struct radeon_vm *vm = bo_va->vm;
930 963
931 mutex_lock(&bo_va->vm->mutex); 964 list_del(&bo_va->bo_list);
932 if (bo_va->soffset)
933 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
934 965
966 mutex_lock(&vm->mutex);
935 list_del(&bo_va->vm_list); 967 list_del(&bo_va->vm_list);
936 mutex_unlock(&bo_va->vm->mutex);
937 list_del(&bo_va->bo_list);
938 968
939 kfree(bo_va); 969 if (bo_va->soffset) {
940 return r; 970 bo_va->bo = NULL;
971 list_add(&bo_va->vm_status, &vm->freed);
972 } else {
973 kfree(bo_va);
974 }
975
976 mutex_unlock(&vm->mutex);
941} 977}
942 978
943/** 979/**
@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
975 int r; 1011 int r;
976 1012
977 vm->id = 0; 1013 vm->id = 0;
1014 vm->ib_bo_va = NULL;
978 vm->fence = NULL; 1015 vm->fence = NULL;
979 vm->last_flush = NULL; 1016 vm->last_flush = NULL;
980 vm->last_id_use = NULL; 1017 vm->last_id_use = NULL;
981 mutex_init(&vm->mutex); 1018 mutex_init(&vm->mutex);
982 INIT_LIST_HEAD(&vm->va); 1019 INIT_LIST_HEAD(&vm->va);
1020 INIT_LIST_HEAD(&vm->freed);
983 1021
984 pd_size = radeon_vm_directory_size(rdev); 1022 pd_size = radeon_vm_directory_size(rdev);
985 pd_entries = radeon_vm_num_pdes(rdev); 1023 pd_entries = radeon_vm_num_pdes(rdev);
@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1034 kfree(bo_va); 1072 kfree(bo_va);
1035 } 1073 }
1036 } 1074 }
1037 1075 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
1076 kfree(bo_va);
1038 1077
1039 for (i = 0; i < radeon_vm_num_pdes(rdev); i++) 1078 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1040 radeon_bo_unref(&vm->page_tables[i].bo); 1079 radeon_bo_unref(&vm->page_tables[i].bo);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 237dd29d9f1c..3e21e869015f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
406 for (i = 0; i < rdev->num_crtc; i++) { 406 for (i = 0; i < rdev->num_crtc; i++) {
407 if (save->crtc_enabled[i]) { 407 if (save->crtc_enabled[i]) {
408 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); 408 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
409 if ((tmp & 0x3) != 0) { 409 if ((tmp & 0x7) != 3) {
410 tmp &= ~0x3; 410 tmp &= ~0x7;
411 tmp |= 0x3;
411 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); 412 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
412 } 413 }
413 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); 414 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index eba0225259a4..9e854fd016da 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
6103 tmp = RREG32(IH_RB_CNTL); 6103 tmp = RREG32(IH_RB_CNTL);
6104 tmp |= IH_WPTR_OVERFLOW_CLEAR; 6104 tmp |= IH_WPTR_OVERFLOW_CLEAR;
6105 WREG32(IH_RB_CNTL, tmp); 6105 WREG32(IH_RB_CNTL, tmp);
6106 wptr &= ~RB_OVERFLOW;
6106 } 6107 }
6107 return (wptr & rdev->ih.ptr_mask); 6108 return (wptr & rdev->ih.ptr_mask);
6108} 6109}
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 20da6ff183df..32e50be9c4ac 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1875 pi->at[i] = TRINITY_AT_DFLT; 1875 pi->at[i] = TRINITY_AT_DFLT;
1876 1876
1877 /* There are stability issues reported on latops with 1877 /* There are stability issues reported on with
1878 * bapm installed when switching between AC and battery 1878 * bapm enabled when switching between AC and battery
1879 * power. At the same time, some desktop boards hang 1879 * power. At the same time, some MSI boards hang
1880 * if it's not enabled and dpm is enabled. 1880 * if it's not enabled and dpm is enabled. Just enable
1881 * it for MSI boards right now.
1881 */ 1882 */
1882 if (rdev->flags & RADEON_IS_MOBILITY) 1883 if (rdev->pdev->subsystem_vendor == 0x1462)
1883 pi->enable_bapm = false;
1884 else
1885 pi->enable_bapm = true; 1884 pi->enable_bapm = true;
1885 else
1886 pi->enable_bapm = false;
1886 pi->enable_nbps_policy = true; 1887 pi->enable_nbps_policy = true;
1887 pi->enable_sclk_ds = true; 1888 pi->enable_sclk_ds = true;
1888 pi->enable_gfx_power_gating = true; 1889 pi->enable_gfx_power_gating = true;
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index eaaa3d843b80..23b2ce294c4c 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -246,8 +246,8 @@ void hv_fcopy_onchannelcallback(void *context)
246 /* 246 /*
247 * Send the information to the user-level daemon. 247 * Send the information to the user-level daemon.
248 */ 248 */
249 fcopy_send_data();
250 schedule_delayed_work(&fcopy_work, 5*HZ); 249 schedule_delayed_work(&fcopy_work, 5*HZ);
250 fcopy_send_data();
251 return; 251 return;
252 } 252 }
253 icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; 253 icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 0f4dea5ccf17..9ee3913850d6 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
515 return -EINVAL; 515 return -EINVAL;
516 516
517 temp = DIV_ROUND_CLOSEST(temp, 1000); 517 temp = DIV_ROUND_CLOSEST(temp, 1000);
518 temp = clamp_val(temp, 0, 255); 518 temp = clamp_val(temp, -128, 127);
519 519
520 mutex_lock(&data->lock); 520 mutex_lock(&data->lock);
521 data->temp_min[attr->index] = temp; 521 data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
549 return -EINVAL; 549 return -EINVAL;
550 550
551 temp = DIV_ROUND_CLOSEST(temp, 1000); 551 temp = DIV_ROUND_CLOSEST(temp, 1000);
552 temp = clamp_val(temp, 0, 255); 552 temp = clamp_val(temp, -128, 127);
553 553
554 mutex_lock(&data->lock); 554 mutex_lock(&data->lock);
555 data->temp_max[attr->index] = temp; 555 data->temp_max[attr->index] = temp;
@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
826 return -EINVAL; 826 return -EINVAL;
827 827
828 temp = DIV_ROUND_CLOSEST(temp, 1000); 828 temp = DIV_ROUND_CLOSEST(temp, 1000);
829 temp = clamp_val(temp, 0, 255); 829 temp = clamp_val(temp, -128, 127);
830 830
831 mutex_lock(&data->lock); 831 mutex_lock(&data->lock);
832 data->pwm_tmin[attr->index] = temp; 832 data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index afd31042b452..d14ab3c45daa 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
194 struct device_attribute *devattr, 194 struct device_attribute *devattr,
195 char *buf) 195 char *buf)
196{ 196{
197 return sprintf(buf, "da9052-hwmon\n"); 197 return sprintf(buf, "da9052\n");
198} 198}
199 199
200static ssize_t show_label(struct device *dev, 200static ssize_t show_label(struct device *dev,
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 73b3865f1207..35eb7738d711 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
204 struct device_attribute *devattr, 204 struct device_attribute *devattr,
205 char *buf) 205 char *buf)
206{ 206{
207 return sprintf(buf, "da9055-hwmon\n"); 207 return sprintf(buf, "da9055\n");
208} 208}
209 209
210static ssize_t show_label(struct device *dev, 210static ssize_t show_label(struct device *dev,
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index efee4c59239f..34b9a601ad07 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
86 */ 86 */
87static inline s8 TEMP_TO_REG(int val) 87static inline s8 TEMP_TO_REG(int val)
88{ 88{
89 return clamp_val(SCALE(val, 1, 1000), -128000, 127000); 89 return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
90} 90}
91 91
92static inline int TEMP_FROM_REG(s8 val) 92static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
384 err = kstrtoul(buf, 10, &val); 384 err = kstrtoul(buf, 10, &val);
385 if (err) 385 if (err)
386 return err; 386 return err;
387 if (val > 255)
388 return -EINVAL;
387 389
388 data->vrm = val; 390 data->vrm = val;
389 return count; 391 return count;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 8fb46aab2d87..a04c49f2a011 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -416,6 +416,7 @@ config BLK_DEV_CY82C693
416 416
417config BLK_DEV_CS5520 417config BLK_DEV_CS5520
418 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)" 418 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
419 depends on X86_32 || COMPILE_TEST
419 select BLK_DEV_IDEDMA_PCI 420 select BLK_DEV_IDEDMA_PCI
420 help 421 help
421 Include support for PIO tuning and virtual DMA on the Cyrix MediaGX 422 Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -426,6 +427,7 @@ config BLK_DEV_CS5520
426 427
427config BLK_DEV_CS5530 428config BLK_DEV_CS5530
428 tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support" 429 tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
430 depends on X86_32 || COMPILE_TEST
429 select BLK_DEV_IDEDMA_PCI 431 select BLK_DEV_IDEDMA_PCI
430 help 432 help
431 Include support for UDMA on the Cyrix MediaGX 5530 chipset. This 433 Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
@@ -435,7 +437,7 @@ config BLK_DEV_CS5530
435 437
436config BLK_DEV_CS5535 438config BLK_DEV_CS5535
437 tristate "AMD CS5535 chipset support" 439 tristate "AMD CS5535 chipset support"
438 depends on X86 && !X86_64 440 depends on X86_32
439 select BLK_DEV_IDEDMA_PCI 441 select BLK_DEV_IDEDMA_PCI
440 help 442 help
441 Include support for UDMA on the NSC/AMD CS5535 companion chipset. 443 Include support for UDMA on the NSC/AMD CS5535 companion chipset.
@@ -486,6 +488,7 @@ config BLK_DEV_JMICRON
486 488
487config BLK_DEV_SC1200 489config BLK_DEV_SC1200
488 tristate "National SCx200 chipset support" 490 tristate "National SCx200 chipset support"
491 depends on X86_32 || COMPILE_TEST
489 select BLK_DEV_IDEDMA_PCI 492 select BLK_DEV_IDEDMA_PCI
490 help 493 help
491 This driver adds support for the on-board IDE controller on the 494 This driver adds support for the on-board IDE controller on the
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 2a744a91370e..a3d3b1733c49 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif)
853 if (irq_handler == NULL) 853 if (irq_handler == NULL)
854 irq_handler = ide_intr; 854 irq_handler = ide_intr;
855 855
856 if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) 856 if (!host->get_lock)
857 goto out_up; 857 if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
858 goto out_up;
858 859
859#if !defined(__mc68000__) 860#if !defined(__mc68000__)
860 printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name, 861 printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
@@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif)
1533 1534
1534 ide_proc_unregister_port(hwif); 1535 ide_proc_unregister_port(hwif);
1535 1536
1536 free_irq(hwif->irq, hwif); 1537 if (!hwif->host->get_lock)
1538 free_irq(hwif->irq, hwif);
1537 1539
1538 device_unregister(hwif->portdev); 1540 device_unregister(hwif->portdev);
1539 device_unregister(&hwif->gendev); 1541 device_unregister(&hwif->gendev);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index a7e68c81f89d..a077cc86421b 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -68,13 +68,13 @@
68/* Defaults values */ 68/* Defaults values */
69#define BMA180_DEF_PMODE 0 69#define BMA180_DEF_PMODE 0
70#define BMA180_DEF_BW 20 70#define BMA180_DEF_BW 20
71#define BMA180_DEF_SCALE 250 71#define BMA180_DEF_SCALE 2452
72 72
73/* Available values for sysfs */ 73/* Available values for sysfs */
74#define BMA180_FLP_FREQ_AVAILABLE \ 74#define BMA180_FLP_FREQ_AVAILABLE \
75 "10 20 40 75 150 300" 75 "10 20 40 75 150 300"
76#define BMA180_SCALE_AVAILABLE \ 76#define BMA180_SCALE_AVAILABLE \
77 "0.000130 0.000190 0.000250 0.000380 0.000500 0.000990 0.001980" 77 "0.001275 0.001863 0.002452 0.003727 0.004903 0.009709 0.019417"
78 78
79struct bma180_data { 79struct bma180_data {
80 struct i2c_client *client; 80 struct i2c_client *client;
@@ -94,7 +94,7 @@ enum bma180_axis {
94}; 94};
95 95
96static int bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */ 96static int bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
97static int scale_table[] = { 130, 190, 250, 380, 500, 990, 1980 }; 97static int scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
98 98
99static int bma180_get_acc_reg(struct bma180_data *data, enum bma180_axis axis) 99static int bma180_get_acc_reg(struct bma180_data *data, enum bma180_axis axis)
100{ 100{
@@ -376,6 +376,8 @@ static int bma180_write_raw(struct iio_dev *indio_dev,
376 mutex_unlock(&data->mutex); 376 mutex_unlock(&data->mutex);
377 return ret; 377 return ret;
378 case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: 378 case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
379 if (val2)
380 return -EINVAL;
379 mutex_lock(&data->mutex); 381 mutex_lock(&data->mutex);
380 ret = bma180_set_bw(data, val); 382 ret = bma180_set_bw(data, val);
381 mutex_unlock(&data->mutex); 383 mutex_unlock(&data->mutex);
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 17aeea170566..2a5fa9a436e5 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -111,8 +111,14 @@ static const int mma8452_samp_freq[8][2] = {
111 {6, 250000}, {1, 560000} 111 {6, 250000}, {1, 560000}
112}; 112};
113 113
114/*
115 * Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
116 * The userspace interface uses m/s^2 and we declare micro units
117 * So scale factor is given by:
118 * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
119 */
114static const int mma8452_scales[3][2] = { 120static const int mma8452_scales[3][2] = {
115 {0, 977}, {0, 1953}, {0, 3906} 121 {0, 9577}, {0, 19154}, {0, 38307}
116}; 122};
117 123
118static ssize_t mma8452_show_samp_freq_avail(struct device *dev, 124static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 36b1ae92e239..9f1a14009901 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -966,7 +966,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
966 966
967 /* Now we have the two masks, work from least sig and build up sizes */ 967 /* Now we have the two masks, work from least sig and build up sizes */
968 for_each_set_bit(out_ind, 968 for_each_set_bit(out_ind,
969 indio_dev->active_scan_mask, 969 buffer->scan_mask,
970 indio_dev->masklength) { 970 indio_dev->masklength) {
971 in_ind = find_next_bit(indio_dev->active_scan_mask, 971 in_ind = find_next_bit(indio_dev->active_scan_mask,
972 indio_dev->masklength, 972 indio_dev->masklength,
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 258a973a1fb8..bfbf4d419f41 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -345,6 +345,9 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
345 &indio_dev->event_interface->dev_attr_list); 345 &indio_dev->event_interface->dev_attr_list);
346 kfree(postfix); 346 kfree(postfix);
347 347
348 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
349 continue;
350
348 if (ret) 351 if (ret)
349 return ret; 352 return ret;
350 353
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 5e153f6d4b48..768a0fb67dd6 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -432,8 +432,17 @@ static void arp_failure_discard(void *handle, struct sk_buff *skb)
432 */ 432 */
433static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 433static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
434{ 434{
435 struct c4iw_ep *ep = handle;
436
435 printk(KERN_ERR MOD "ARP failure duing connect\n"); 437 printk(KERN_ERR MOD "ARP failure duing connect\n");
436 kfree_skb(skb); 438 kfree_skb(skb);
439 connect_reply_upcall(ep, -EHOSTUNREACH);
440 state_set(&ep->com, DEAD);
441 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
442 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
443 dst_release(ep->dst);
444 cxgb4_l2t_release(ep->l2t);
445 c4iw_put_ep(&ep->com);
437} 446}
438 447
439/* 448/*
@@ -658,7 +667,7 @@ static int send_connect(struct c4iw_ep *ep)
658 opt2 |= T5_OPT_2_VALID; 667 opt2 |= T5_OPT_2_VALID;
659 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 668 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
660 } 669 }
661 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 670 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
662 671
663 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 672 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
664 if (ep->com.remote_addr.ss_family == AF_INET) { 673 if (ep->com.remote_addr.ss_family == AF_INET) {
@@ -2180,7 +2189,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
2180 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid); 2189 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
2181 BUG_ON(skb_cloned(skb)); 2190 BUG_ON(skb_cloned(skb));
2182 skb_trim(skb, sizeof(struct cpl_tid_release)); 2191 skb_trim(skb, sizeof(struct cpl_tid_release));
2183 skb_get(skb);
2184 release_tid(&dev->rdev, hwtid, skb); 2192 release_tid(&dev->rdev, hwtid, skb);
2185 return; 2193 return;
2186} 2194}
@@ -3917,7 +3925,7 @@ int __init c4iw_cm_init(void)
3917 return 0; 3925 return 0;
3918} 3926}
3919 3927
3920void __exit c4iw_cm_term(void) 3928void c4iw_cm_term(void)
3921{ 3929{
3922 WARN_ON(!list_empty(&timeout_list)); 3930 WARN_ON(!list_empty(&timeout_list));
3923 flush_workqueue(workq); 3931 flush_workqueue(workq);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index dd93aadc996e..7db82b24302b 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -696,6 +696,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
696 pr_err(MOD "error allocating status page\n"); 696 pr_err(MOD "error allocating status page\n");
697 goto err4; 697 goto err4;
698 } 698 }
699 rdev->status_page->db_off = 0;
699 return 0; 700 return 0;
700err4: 701err4:
701 c4iw_rqtpool_destroy(rdev); 702 c4iw_rqtpool_destroy(rdev);
@@ -729,7 +730,6 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
729 if (ctx->dev->rdev.oc_mw_kva) 730 if (ctx->dev->rdev.oc_mw_kva)
730 iounmap(ctx->dev->rdev.oc_mw_kva); 731 iounmap(ctx->dev->rdev.oc_mw_kva);
731 ib_dealloc_device(&ctx->dev->ibdev); 732 ib_dealloc_device(&ctx->dev->ibdev);
732 iwpm_exit(RDMA_NL_C4IW);
733 ctx->dev = NULL; 733 ctx->dev = NULL;
734} 734}
735 735
@@ -826,12 +826,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
826 setup_debugfs(devp); 826 setup_debugfs(devp);
827 } 827 }
828 828
829 ret = iwpm_init(RDMA_NL_C4IW);
830 if (ret) {
831 pr_err("port mapper initialization failed with %d\n", ret);
832 ib_dealloc_device(&devp->ibdev);
833 return ERR_PTR(ret);
834 }
835 829
836 return devp; 830 return devp;
837} 831}
@@ -1332,6 +1326,15 @@ static int __init c4iw_init_module(void)
1332 pr_err("%s[%u]: Failed to add netlink callback\n" 1326 pr_err("%s[%u]: Failed to add netlink callback\n"
1333 , __func__, __LINE__); 1327 , __func__, __LINE__);
1334 1328
1329 err = iwpm_init(RDMA_NL_C4IW);
1330 if (err) {
1331 pr_err("port mapper initialization failed with %d\n", err);
1332 ibnl_remove_client(RDMA_NL_C4IW);
1333 c4iw_cm_term();
1334 debugfs_remove_recursive(c4iw_debugfs_root);
1335 return err;
1336 }
1337
1335 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); 1338 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1336 1339
1337 return 0; 1340 return 0;
@@ -1349,6 +1352,7 @@ static void __exit c4iw_exit_module(void)
1349 } 1352 }
1350 mutex_unlock(&dev_mutex); 1353 mutex_unlock(&dev_mutex);
1351 cxgb4_unregister_uld(CXGB4_ULD_RDMA); 1354 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
1355 iwpm_exit(RDMA_NL_C4IW);
1352 ibnl_remove_client(RDMA_NL_C4IW); 1356 ibnl_remove_client(RDMA_NL_C4IW);
1353 c4iw_cm_term(); 1357 c4iw_cm_term();
1354 debugfs_remove_recursive(c4iw_debugfs_root); 1358 debugfs_remove_recursive(c4iw_debugfs_root);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 125bc5d1e175..361fff7a0742 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -908,7 +908,7 @@ int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
908int c4iw_register_device(struct c4iw_dev *dev); 908int c4iw_register_device(struct c4iw_dev *dev);
909void c4iw_unregister_device(struct c4iw_dev *dev); 909void c4iw_unregister_device(struct c4iw_dev *dev);
910int __init c4iw_cm_init(void); 910int __init c4iw_cm_init(void);
911void __exit c4iw_cm_term(void); 911void c4iw_cm_term(void);
912void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, 912void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
913 struct c4iw_dev_ucontext *uctx); 913 struct c4iw_dev_ucontext *uctx);
914void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, 914void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d13ddf1c0033..bbbcf389272c 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -675,7 +675,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
675 int err; 675 int err;
676 676
677 uuari = &dev->mdev.priv.uuari; 677 uuari = &dev->mdev.priv.uuari;
678 if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN) 678 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
679 return -EINVAL; 679 return -EINVAL;
680 680
681 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) 681 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1c4c0db05550..29ca0bb4f561 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
257} 257}
258 258
259static int input_get_disposition(struct input_dev *dev, 259static int input_get_disposition(struct input_dev *dev,
260 unsigned int type, unsigned int code, int value) 260 unsigned int type, unsigned int code, int *pval)
261{ 261{
262 int disposition = INPUT_IGNORE_EVENT; 262 int disposition = INPUT_IGNORE_EVENT;
263 int value = *pval;
263 264
264 switch (type) { 265 switch (type) {
265 266
@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
357 break; 358 break;
358 } 359 }
359 360
361 *pval = value;
360 return disposition; 362 return disposition;
361} 363}
362 364
@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
365{ 367{
366 int disposition; 368 int disposition;
367 369
368 disposition = input_get_disposition(dev, type, code, value); 370 disposition = input_get_disposition(dev, type, code, &value);
369 371
370 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) 372 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
371 dev->event(dev, type, code, value); 373 dev->event(dev, type, code, value);
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index 758b48731415..de7be4f03d91 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev)
215 return 0; 215 return 0;
216} 216}
217 217
218#ifdef CONFIG_PM_SLEEP
218static int keyscan_suspend(struct device *dev) 219static int keyscan_suspend(struct device *dev)
219{ 220{
220 struct platform_device *pdev = to_platform_device(dev); 221 struct platform_device *pdev = to_platform_device(dev);
@@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev)
249 mutex_unlock(&input->mutex); 250 mutex_unlock(&input->mutex);
250 return retval; 251 return retval;
251} 252}
253#endif
252 254
253static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume); 255static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
254 256
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index e4104f9b2e6d..fed5102e1802 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
213 213
214module_platform_driver(sirfsoc_pwrc_driver); 214module_platform_driver(sirfsoc_pwrc_driver);
215 215
216MODULE_LICENSE("GPLv2"); 216MODULE_LICENSE("GPL v2");
217MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>"); 217MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
218MODULE_DESCRIPTION("CSR Prima2 PWRC Driver"); 218MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
219MODULE_ALIAS("platform:sirfsoc-pwrc"); 219MODULE_ALIAS("platform:sirfsoc-pwrc");
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ec772d962f06..ef9e0b8a9aa7 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
132 1232, 5710, 1156, 4696 132 1232, 5710, 1156, 4696
133 }, 133 },
134 { 134 {
135 (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL}, 135 (const char * const []){"LEN0034", "LEN0036", "LEN2002",
136 "LEN2004", NULL},
136 1024, 5112, 2024, 4832 137 1024, 5112, 2024, 4832
137 }, 138 },
138 { 139 {
@@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
168 "LEN0049", 169 "LEN0049",
169 "LEN2000", 170 "LEN2000",
170 "LEN2001", /* Edge E431 */ 171 "LEN2001", /* Edge E431 */
171 "LEN2002", 172 "LEN2002", /* Edge E531 */
172 "LEN2003", 173 "LEN2003",
173 "LEN2004", /* L440 */ 174 "LEN2004", /* L440 */
174 "LEN2005", 175 "LEN2005",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 381b20d4c561..136b7b204f56 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -402,6 +402,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
402 }, 402 },
403 }, 403 },
404 { 404 {
405 /* Acer Aspire 5710 */
406 .matches = {
407 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
408 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
409 },
410 },
411 {
405 /* Gericom Bellagio */ 412 /* Gericom Bellagio */
406 .matches = { 413 .matches = {
407 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), 414 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 977d05cd9e2e..e73cf2c71f35 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1217 * a=(pi*r^2)/C. 1217 * a=(pi*r^2)/C.
1218 */ 1218 */
1219 int a = data[5]; 1219 int a = data[5];
1220 int x_res = input_abs_get_res(input, ABS_X); 1220 int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
1221 int y_res = input_abs_get_res(input, ABS_Y); 1221 int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
1222 width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE); 1222 width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
1223 height = width * y_res / x_res; 1223 height = width * y_res / x_res;
1224 } 1224 }
1225 1225
@@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
1587 input_abs_set_res(input_dev, ABS_X, features->x_resolution); 1587 input_abs_set_res(input_dev, ABS_X, features->x_resolution);
1588 input_abs_set_res(input_dev, ABS_Y, features->y_resolution); 1588 input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
1589 } else { 1589 } else {
1590 if (features->touch_max <= 2) { 1590 if (features->touch_max == 1) {
1591 input_set_abs_params(input_dev, ABS_X, 0, 1591 input_set_abs_params(input_dev, ABS_X, 0,
1592 features->x_max, features->x_fuzz, 0); 1592 features->x_max, features->x_fuzz, 0);
1593 input_set_abs_params(input_dev, ABS_Y, 0, 1593 input_set_abs_params(input_dev, ABS_Y, 0,
@@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1815 case MTTPC: 1815 case MTTPC:
1816 case MTTPC_B: 1816 case MTTPC_B:
1817 case TABLETPC2FG: 1817 case TABLETPC2FG:
1818 if (features->device_type == BTN_TOOL_FINGER) { 1818 if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
1819 unsigned int flags = INPUT_MT_DIRECT; 1819 input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
1820
1821 if (wacom_wac->features.type == TABLETPC2FG)
1822 flags = 0;
1823
1824 input_mt_init_slots(input_dev, features->touch_max, flags);
1825 }
1826 /* fall through */ 1820 /* fall through */
1827 1821
1828 case TABLETPC: 1822 case TABLETPC:
@@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1883 __set_bit(BTN_RIGHT, input_dev->keybit); 1877 __set_bit(BTN_RIGHT, input_dev->keybit);
1884 1878
1885 if (features->touch_max) { 1879 if (features->touch_max) {
1886 /* touch interface */
1887 unsigned int flags = INPUT_MT_POINTER;
1888
1889 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1890 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { 1880 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
1891 input_set_abs_params(input_dev, 1881 input_set_abs_params(input_dev,
1892 ABS_MT_TOUCH_MAJOR, 1882 ABS_MT_TOUCH_MAJOR,
@@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1894 input_set_abs_params(input_dev, 1884 input_set_abs_params(input_dev,
1895 ABS_MT_TOUCH_MINOR, 1885 ABS_MT_TOUCH_MINOR,
1896 0, features->y_max, 0, 0); 1886 0, features->y_max, 0, 0);
1897 } else {
1898 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1899 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
1900 flags = 0;
1901 } 1887 }
1902 input_mt_init_slots(input_dev, features->touch_max, flags); 1888 input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
1903 } else { 1889 } else {
1904 /* buttons/keys only interface */ 1890 /* buttons/keys only interface */
1905 __clear_bit(ABS_X, input_dev->absbit); 1891 __clear_bit(ABS_X, input_dev->absbit);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 4e793a17361f..2ce649520fe0 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev,
359 */ 359 */
360 err = of_property_read_u32(node, "ti,coordinate-readouts", 360 err = of_property_read_u32(node, "ti,coordinate-readouts",
361 &ts_dev->coordinate_readouts); 361 &ts_dev->coordinate_readouts);
362 if (err < 0) 362 if (err < 0) {
363 dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
363 err = of_property_read_u32(node, "ti,coordiante-readouts", 364 err = of_property_read_u32(node, "ti,coordiante-readouts",
364 &ts_dev->coordinate_readouts); 365 &ts_dev->coordinate_readouts);
366 }
367
365 if (err < 0) 368 if (err < 0)
366 return err; 369 return err;
367 370
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index b99dd88e31b9..bb446d742a2d 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -170,10 +170,10 @@ int pamu_disable_liodn(int liodn)
170static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) 170static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
171{ 171{
172 /* Bug if not a power of 2 */ 172 /* Bug if not a power of 2 */
173 BUG_ON(!is_power_of_2(addrspace_size)); 173 BUG_ON((addrspace_size & (addrspace_size - 1)));
174 174
175 /* window size is 2^(WSE+1) bytes */ 175 /* window size is 2^(WSE+1) bytes */
176 return __ffs(addrspace_size) - 1; 176 return fls64(addrspace_size) - 2;
177} 177}
178 178
179/* Derive the PAACE window count encoding for the subwindow count */ 179/* Derive the PAACE window count encoding for the subwindow count */
@@ -351,7 +351,7 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
351 struct paace *ppaace; 351 struct paace *ppaace;
352 unsigned long fspi; 352 unsigned long fspi;
353 353
354 if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) { 354 if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
355 pr_debug("window size too small or not a power of two %llx\n", win_size); 355 pr_debug("window size too small or not a power of two %llx\n", win_size);
356 return -EINVAL; 356 return -EINVAL;
357 } 357 }
@@ -464,7 +464,7 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
464 return -ENOENT; 464 return -ENOENT;
465 } 465 }
466 466
467 if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) { 467 if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
468 pr_debug("subwindow size out of range, or not a power of 2\n"); 468 pr_debug("subwindow size out of range, or not a power of 2\n");
469 return -EINVAL; 469 return -EINVAL;
470 } 470 }
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 93072ba44b1d..af47648301a9 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -301,7 +301,7 @@ static int check_size(u64 size, dma_addr_t iova)
301 * Size must be a power of two and at least be equal 301 * Size must be a power of two and at least be equal
302 * to PAMU page size. 302 * to PAMU page size.
303 */ 303 */
304 if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) { 304 if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
305 pr_debug("%s: size too small or not a power of two\n", __func__); 305 pr_debug("%s: size too small or not a power of two\n", __func__);
306 return -EINVAL; 306 return -EINVAL;
307 } 307 }
@@ -335,11 +335,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
335 return domain; 335 return domain;
336} 336}
337 337
338static inline struct device_domain_info *find_domain(struct device *dev)
339{
340 return dev->archdata.iommu_domain;
341}
342
343static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) 338static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
344{ 339{
345 unsigned long flags; 340 unsigned long flags;
@@ -380,7 +375,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
380 * Check here if the device is already attached to domain or not. 375 * Check here if the device is already attached to domain or not.
381 * If the device is already attached to a domain detach it. 376 * If the device is already attached to a domain detach it.
382 */ 377 */
383 old_domain_info = find_domain(dev); 378 old_domain_info = dev->archdata.iommu_domain;
384 if (old_domain_info && old_domain_info->domain != dma_domain) { 379 if (old_domain_info && old_domain_info->domain != dma_domain) {
385 spin_unlock_irqrestore(&device_domain_lock, flags); 380 spin_unlock_irqrestore(&device_domain_lock, flags);
386 detach_device(dev, old_domain_info->domain); 381 detach_device(dev, old_domain_info->domain);
@@ -399,7 +394,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
399 * the info for the first LIODN as all 394 * the info for the first LIODN as all
400 * LIODNs share the same domain 395 * LIODNs share the same domain
401 */ 396 */
402 if (!old_domain_info) 397 if (!dev->archdata.iommu_domain)
403 dev->archdata.iommu_domain = info; 398 dev->archdata.iommu_domain = info;
404 spin_unlock_irqrestore(&device_domain_lock, flags); 399 spin_unlock_irqrestore(&device_domain_lock, flags);
405 400
@@ -1042,12 +1037,15 @@ root_bus:
1042 group = get_shared_pci_device_group(pdev); 1037 group = get_shared_pci_device_group(pdev);
1043 } 1038 }
1044 1039
1040 if (!group)
1041 group = ERR_PTR(-ENODEV);
1042
1045 return group; 1043 return group;
1046} 1044}
1047 1045
1048static int fsl_pamu_add_device(struct device *dev) 1046static int fsl_pamu_add_device(struct device *dev)
1049{ 1047{
1050 struct iommu_group *group = NULL; 1048 struct iommu_group *group = ERR_PTR(-ENODEV);
1051 struct pci_dev *pdev; 1049 struct pci_dev *pdev;
1052 const u32 *prop; 1050 const u32 *prop;
1053 int ret, len; 1051 int ret, len;
@@ -1070,7 +1068,7 @@ static int fsl_pamu_add_device(struct device *dev)
1070 group = get_device_iommu_group(dev); 1068 group = get_device_iommu_group(dev);
1071 } 1069 }
1072 1070
1073 if (!group || IS_ERR(group)) 1071 if (IS_ERR(group))
1074 return PTR_ERR(group); 1072 return PTR_ERR(group);
1075 1073
1076 ret = iommu_group_add_device(group, dev); 1074 ret = iommu_group_add_device(group, dev);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7e11c9d6ae8c..7c131cf7cc13 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -42,6 +42,7 @@
42#include <linux/irqchip/chained_irq.h> 42#include <linux/irqchip/chained_irq.h>
43#include <linux/irqchip/arm-gic.h> 43#include <linux/irqchip/arm-gic.h>
44 44
45#include <asm/cputype.h>
45#include <asm/irq.h> 46#include <asm/irq.h>
46#include <asm/exception.h> 47#include <asm/exception.h>
47#include <asm/smp_plat.h> 48#include <asm/smp_plat.h>
@@ -954,7 +955,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
954 } 955 }
955 956
956 for_each_possible_cpu(cpu) { 957 for_each_possible_cpu(cpu) {
957 unsigned long offset = percpu_offset * cpu_logical_map(cpu); 958 u32 mpidr = cpu_logical_map(cpu);
959 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
960 unsigned long offset = percpu_offset * core_id;
958 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; 961 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
959 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; 962 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
960 } 963 }
@@ -1071,8 +1074,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
1071 gic_cnt++; 1074 gic_cnt++;
1072 return 0; 1075 return 0;
1073} 1076}
1077IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
1074IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); 1078IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
1075IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); 1079IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
1080IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
1076IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); 1081IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
1077IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); 1082IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
1078 1083
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index c44950d3eb7b..b7ae0a0dd5b6 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2400,6 +2400,7 @@ allocerr:
2400error: 2400error:
2401 freeurbs(cs); 2401 freeurbs(cs);
2402 usb_set_intfdata(interface, NULL); 2402 usb_set_intfdata(interface, NULL);
2403 usb_put_dev(udev);
2403 gigaset_freecs(cs); 2404 gigaset_freecs(cs);
2404 return rc; 2405 return rc;
2405} 2406}
diff --git a/drivers/isdn/hisax/l3ni1.c b/drivers/isdn/hisax/l3ni1.c
index 0df6691d045c..8dc791bfaa6f 100644
--- a/drivers/isdn/hisax/l3ni1.c
+++ b/drivers/isdn/hisax/l3ni1.c
@@ -2059,13 +2059,17 @@ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic)
2059 memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */ 2059 memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */
2060 l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */ 2060 l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */
2061 2061
2062 if (ic->parm.ni1_io.timeout > 0) 2062 if (ic->parm.ni1_io.timeout > 0) {
2063 if (!(pc = ni1_new_l3_process(st, -1))) 2063 pc = ni1_new_l3_process(st, -1);
2064 { free_invoke_id(st, id); 2064 if (!pc) {
2065 free_invoke_id(st, id);
2065 return (-2); 2066 return (-2);
2066 } 2067 }
2067 pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* remember id */ 2068 /* remember id */
2068 pc->prot.ni1.proc = ic->parm.ni1_io.proc; /* and procedure */ 2069 pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id;
2070 /* and procedure */
2071 pc->prot.ni1.proc = ic->parm.ni1_io.proc;
2072 }
2069 2073
2070 if (!(skb = l3_alloc_skb(l))) 2074 if (!(skb = l3_alloc_skb(l)))
2071 { free_invoke_id(st, id); 2075 { free_invoke_id(st, id);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 61ac63237446..62f0688d45a5 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -442,7 +442,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
442{ 442{
443 struct sock_fprog uprog; 443 struct sock_fprog uprog;
444 struct sock_filter *code = NULL; 444 struct sock_filter *code = NULL;
445 int len, err; 445 int len;
446 446
447 if (copy_from_user(&uprog, arg, sizeof(uprog))) 447 if (copy_from_user(&uprog, arg, sizeof(uprog)))
448 return -EFAULT; 448 return -EFAULT;
@@ -458,12 +458,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
458 if (IS_ERR(code)) 458 if (IS_ERR(code))
459 return PTR_ERR(code); 459 return PTR_ERR(code);
460 460
461 err = sk_chk_filter(code, uprog.len);
462 if (err) {
463 kfree(code);
464 return err;
465 }
466
467 *p = code; 461 *p = code;
468 return uprog.len; 462 return uprog.len;
469} 463}
@@ -644,9 +638,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
644 fprog.len = len; 638 fprog.len = len;
645 fprog.filter = code; 639 fprog.filter = code;
646 640
647 if (is->pass_filter) 641 if (is->pass_filter) {
648 sk_unattached_filter_destroy(is->pass_filter); 642 sk_unattached_filter_destroy(is->pass_filter);
649 err = sk_unattached_filter_create(&is->pass_filter, &fprog); 643 is->pass_filter = NULL;
644 }
645 if (fprog.filter != NULL)
646 err = sk_unattached_filter_create(&is->pass_filter,
647 &fprog);
648 else
649 err = 0;
650 kfree(code); 650 kfree(code);
651 651
652 return err; 652 return err;
@@ -663,9 +663,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
663 fprog.len = len; 663 fprog.len = len;
664 fprog.filter = code; 664 fprog.filter = code;
665 665
666 if (is->active_filter) 666 if (is->active_filter) {
667 sk_unattached_filter_destroy(is->active_filter); 667 sk_unattached_filter_destroy(is->active_filter);
668 err = sk_unattached_filter_create(&is->active_filter, &fprog); 668 is->active_filter = NULL;
669 }
670 if (fprog.filter != NULL)
671 err = sk_unattached_filter_create(&is->active_filter,
672 &fprog);
673 else
674 err = 0;
669 kfree(code); 675 kfree(code);
670 676
671 return err; 677 return err;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 4e84095833db..d724459860d9 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1541,7 +1541,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
1541 BUG_ON(block_size < 1 << SECTOR_SHIFT || 1541 BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1542 (block_size & (block_size - 1))); 1542 (block_size & (block_size - 1)));
1543 1543
1544 c = kmalloc(sizeof(*c), GFP_KERNEL); 1544 c = kzalloc(sizeof(*c), GFP_KERNEL);
1545 if (!c) { 1545 if (!c) {
1546 r = -ENOMEM; 1546 r = -ENOMEM;
1547 goto bad_client; 1547 goto bad_client;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 4ead4ba60656..d2899e7eb3aa 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -425,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
425 425
426 disk_super = dm_block_data(sblock); 426 disk_super = dm_block_data(sblock);
427 427
428 /* Verify the data block size hasn't changed */
429 if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
430 DMERR("changing the data block size (from %u to %llu) is not supported",
431 le32_to_cpu(disk_super->data_block_size),
432 (unsigned long long)cmd->data_block_size);
433 r = -EINVAL;
434 goto bad;
435 }
436
428 r = __check_incompat_features(disk_super, cmd); 437 r = __check_incompat_features(disk_super, cmd);
429 if (r < 0) 438 if (r < 0)
430 goto bad; 439 goto bad;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5f054c44b485..2c63326638b6 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -231,7 +231,7 @@ struct cache {
231 /* 231 /*
232 * cache_size entries, dirty if set 232 * cache_size entries, dirty if set
233 */ 233 */
234 dm_cblock_t nr_dirty; 234 atomic_t nr_dirty;
235 unsigned long *dirty_bitset; 235 unsigned long *dirty_bitset;
236 236
237 /* 237 /*
@@ -492,7 +492,7 @@ static bool is_dirty(struct cache *cache, dm_cblock_t b)
492static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) 492static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
493{ 493{
494 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { 494 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
495 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1); 495 atomic_inc(&cache->nr_dirty);
496 policy_set_dirty(cache->policy, oblock); 496 policy_set_dirty(cache->policy, oblock);
497 } 497 }
498} 498}
@@ -501,8 +501,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
501{ 501{
502 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { 502 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
503 policy_clear_dirty(cache->policy, oblock); 503 policy_clear_dirty(cache->policy, oblock);
504 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1); 504 if (atomic_dec_return(&cache->nr_dirty) == 0)
505 if (!from_cblock(cache->nr_dirty))
506 dm_table_event(cache->ti->table); 505 dm_table_event(cache->ti->table);
507 } 506 }
508} 507}
@@ -2269,7 +2268,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2269 atomic_set(&cache->quiescing_ack, 0); 2268 atomic_set(&cache->quiescing_ack, 0);
2270 2269
2271 r = -ENOMEM; 2270 r = -ENOMEM;
2272 cache->nr_dirty = 0; 2271 atomic_set(&cache->nr_dirty, 0);
2273 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); 2272 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2274 if (!cache->dirty_bitset) { 2273 if (!cache->dirty_bitset) {
2275 *error = "could not allocate dirty bitset"; 2274 *error = "could not allocate dirty bitset";
@@ -2808,7 +2807,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
2808 2807
2809 residency = policy_residency(cache->policy); 2808 residency = policy_residency(cache->policy);
2810 2809
2811 DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ", 2810 DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
2812 (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT), 2811 (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
2813 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), 2812 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2814 (unsigned long long)nr_blocks_metadata, 2813 (unsigned long long)nr_blocks_metadata,
@@ -2821,7 +2820,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
2821 (unsigned) atomic_read(&cache->stats.write_miss), 2820 (unsigned) atomic_read(&cache->stats.write_miss),
2822 (unsigned) atomic_read(&cache->stats.demotion), 2821 (unsigned) atomic_read(&cache->stats.demotion),
2823 (unsigned) atomic_read(&cache->stats.promotion), 2822 (unsigned) atomic_read(&cache->stats.promotion),
2824 (unsigned long long) from_cblock(cache->nr_dirty)); 2823 (unsigned long) atomic_read(&cache->nr_dirty));
2825 2824
2826 if (writethrough_mode(&cache->features)) 2825 if (writethrough_mode(&cache->features))
2827 DMEMIT("1 writethrough "); 2826 DMEMIT("1 writethrough ");
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index b086a945edcb..e9d33ad59df5 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
613 613
614 disk_super = dm_block_data(sblock); 614 disk_super = dm_block_data(sblock);
615 615
616 /* Verify the data block size hasn't changed */
617 if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
618 DMERR("changing the data block size (from %u to %llu) is not supported",
619 le32_to_cpu(disk_super->data_block_size),
620 (unsigned long long)pmd->data_block_size);
621 r = -EINVAL;
622 goto bad_unlock_sblock;
623 }
624
616 r = __check_incompat_features(disk_super, pmd); 625 r = __check_incompat_features(disk_super, pmd);
617 if (r < 0) 626 if (r < 0)
618 goto bad_unlock_sblock; 627 goto bad_unlock_sblock;
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 8637d2ed7623..2e3cdcfa0a67 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -60,7 +60,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
60 jiffies_to_msecs(jiffies) - 60 jiffies_to_msecs(jiffies) -
61 (jiffies_to_msecs(timeout) - TIMEOUT)); 61 (jiffies_to_msecs(timeout) - TIMEOUT));
62 62
63 if (!(cmd->args[0] >> 7) & 0x01) { 63 if (!((cmd->args[0] >> 7) & 0x01)) {
64 ret = -ETIMEDOUT; 64 ret = -ETIMEDOUT;
65 goto err_mutex_unlock; 65 goto err_mutex_unlock;
66 } 66 }
@@ -485,20 +485,6 @@ static int si2168_init(struct dvb_frontend *fe)
485 if (ret) 485 if (ret)
486 goto err; 486 goto err;
487 487
488 cmd.args[0] = 0x05;
489 cmd.args[1] = 0x00;
490 cmd.args[2] = 0xaa;
491 cmd.args[3] = 0x4d;
492 cmd.args[4] = 0x56;
493 cmd.args[5] = 0x40;
494 cmd.args[6] = 0x00;
495 cmd.args[7] = 0x00;
496 cmd.wlen = 8;
497 cmd.rlen = 1;
498 ret = si2168_cmd_execute(s, &cmd);
499 if (ret)
500 goto err;
501
502 /* cold state - try to download firmware */ 488 /* cold state - try to download firmware */
503 dev_info(&s->client->dev, "%s: found a '%s' in cold state\n", 489 dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
504 KBUILD_MODNAME, si2168_ops.info.name); 490 KBUILD_MODNAME, si2168_ops.info.name);
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 2a343e896f40..53f7f06ae343 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -22,7 +22,7 @@
22#include <linux/firmware.h> 22#include <linux/firmware.h>
23#include <linux/i2c-mux.h> 23#include <linux/i2c-mux.h>
24 24
25#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw" 25#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw"
26 26
27/* state struct */ 27/* state struct */
28struct si2168 { 28struct si2168 {
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 522fe00f5eee..9619be5d4827 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -668,6 +668,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
668 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 668 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
669 int ret, i; 669 int ret, i;
670 u8 mode, rolloff, pilot, inversion, div; 670 u8 mode, rolloff, pilot, inversion, div;
671 fe_modulation_t modulation;
671 672
672 dev_dbg(&priv->i2c->dev, 673 dev_dbg(&priv->i2c->dev,
673 "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n", 674 "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -702,10 +703,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
702 703
703 switch (c->delivery_system) { 704 switch (c->delivery_system) {
704 case SYS_DVBS: 705 case SYS_DVBS:
706 modulation = QPSK;
705 rolloff = 0; 707 rolloff = 0;
706 pilot = 2; 708 pilot = 2;
707 break; 709 break;
708 case SYS_DVBS2: 710 case SYS_DVBS2:
711 modulation = c->modulation;
712
709 switch (c->rolloff) { 713 switch (c->rolloff) {
710 case ROLLOFF_20: 714 case ROLLOFF_20:
711 rolloff = 2; 715 rolloff = 2;
@@ -750,7 +754,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
750 754
751 for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) { 755 for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
752 if (c->delivery_system == TDA10071_MODCOD[i].delivery_system && 756 if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
753 c->modulation == TDA10071_MODCOD[i].modulation && 757 modulation == TDA10071_MODCOD[i].modulation &&
754 c->fec_inner == TDA10071_MODCOD[i].fec) { 758 c->fec_inner == TDA10071_MODCOD[i].fec) {
755 mode = TDA10071_MODCOD[i].val; 759 mode = TDA10071_MODCOD[i].val;
756 dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n", 760 dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
@@ -834,10 +838,10 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
834 838
835 switch ((buf[1] >> 0) & 0x01) { 839 switch ((buf[1] >> 0) & 0x01) {
836 case 0: 840 case 0:
837 c->inversion = INVERSION_OFF; 841 c->inversion = INVERSION_ON;
838 break; 842 break;
839 case 1: 843 case 1:
840 c->inversion = INVERSION_ON; 844 c->inversion = INVERSION_OFF;
841 break; 845 break;
842 } 846 }
843 847
@@ -856,7 +860,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
856 if (ret) 860 if (ret)
857 goto error; 861 goto error;
858 862
859 c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0); 863 c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000;
860 864
861 return ret; 865 return ret;
862error: 866error:
diff --git a/drivers/media/dvb-frontends/tda10071_priv.h b/drivers/media/dvb-frontends/tda10071_priv.h
index 4baf14bfb65a..420486192736 100644
--- a/drivers/media/dvb-frontends/tda10071_priv.h
+++ b/drivers/media/dvb-frontends/tda10071_priv.h
@@ -55,6 +55,7 @@ static struct tda10071_modcod {
55 { SYS_DVBS2, QPSK, FEC_8_9, 0x0a }, 55 { SYS_DVBS2, QPSK, FEC_8_9, 0x0a },
56 { SYS_DVBS2, QPSK, FEC_9_10, 0x0b }, 56 { SYS_DVBS2, QPSK, FEC_9_10, 0x0b },
57 /* 8PSK */ 57 /* 8PSK */
58 { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 },
58 { SYS_DVBS2, PSK_8, FEC_3_5, 0x0c }, 59 { SYS_DVBS2, PSK_8, FEC_3_5, 0x0c },
59 { SYS_DVBS2, PSK_8, FEC_2_3, 0x0d }, 60 { SYS_DVBS2, PSK_8, FEC_2_3, 0x0d },
60 { SYS_DVBS2, PSK_8, FEC_3_4, 0x0e }, 61 { SYS_DVBS2, PSK_8, FEC_3_4, 0x0e },
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index e65c760e4e8b..0006d6bf8c18 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -179,7 +179,7 @@ static const struct v4l2_file_operations ts_fops =
179 .read = vb2_fop_read, 179 .read = vb2_fop_read,
180 .poll = vb2_fop_poll, 180 .poll = vb2_fop_poll,
181 .mmap = vb2_fop_mmap, 181 .mmap = vb2_fop_mmap,
182 .ioctl = video_ioctl2, 182 .unlocked_ioctl = video_ioctl2,
183}; 183};
184 184
185static const struct v4l2_ioctl_ops ts_ioctl_ops = { 185static const struct v4l2_ioctl_ops ts_ioctl_ops = {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index a7ed16497903..1e4ec697fb10 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -269,6 +269,7 @@ err:
269 list_del(&buf->list); 269 list_del(&buf->list);
270 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 270 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
271 } 271 }
272 spin_unlock_irqrestore(&common->irqlock, flags);
272 273
273 return ret; 274 return ret;
274} 275}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 5bb085b19bcb..b431b58f39e3 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -233,6 +233,7 @@ err:
233 list_del(&buf->list); 233 list_del(&buf->list);
234 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 234 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
235 } 235 }
236 spin_unlock_irqrestore(&common->irqlock, flags);
236 237
237 return ret; 238 return ret;
238} 239}
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 271a752cee54..fa4cc7b880aa 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -57,7 +57,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
57 jiffies_to_msecs(jiffies) - 57 jiffies_to_msecs(jiffies) -
58 (jiffies_to_msecs(timeout) - TIMEOUT)); 58 (jiffies_to_msecs(timeout) - TIMEOUT));
59 59
60 if (!(buf[0] >> 7) & 0x01) { 60 if (!((buf[0] >> 7) & 0x01)) {
61 ret = -ETIMEDOUT; 61 ret = -ETIMEDOUT;
62 goto err_mutex_unlock; 62 goto err_mutex_unlock;
63 } else { 63 } else {
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 021e4d35e4d7..7b9b75f60774 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -704,15 +704,41 @@ static int af9035_read_config(struct dvb_usb_device *d)
704 if (ret < 0) 704 if (ret < 0)
705 goto err; 705 goto err;
706 706
707 if (tmp == 0x00) 707 dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
708 dev_dbg(&d->udev->dev, 708 __func__, i, tmp);
709 "%s: [%d]tuner not set, using default\n", 709
710 __func__, i); 710 /* tuner sanity check */
711 else 711 if (state->chip_type == 0x9135) {
712 if (state->chip_version == 0x02) {
713 /* IT9135 BX (v2) */
714 switch (tmp) {
715 case AF9033_TUNER_IT9135_60:
716 case AF9033_TUNER_IT9135_61:
717 case AF9033_TUNER_IT9135_62:
718 state->af9033_config[i].tuner = tmp;
719 break;
720 }
721 } else {
722 /* IT9135 AX (v1) */
723 switch (tmp) {
724 case AF9033_TUNER_IT9135_38:
725 case AF9033_TUNER_IT9135_51:
726 case AF9033_TUNER_IT9135_52:
727 state->af9033_config[i].tuner = tmp;
728 break;
729 }
730 }
731 } else {
732 /* AF9035 */
712 state->af9033_config[i].tuner = tmp; 733 state->af9033_config[i].tuner = tmp;
734 }
713 735
714 dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n", 736 if (state->af9033_config[i].tuner != tmp) {
715 __func__, i, state->af9033_config[i].tuner); 737 dev_info(&d->udev->dev,
738 "%s: [%d] overriding tuner from %02x to %02x\n",
739 KBUILD_MODNAME, i, tmp,
740 state->af9033_config[i].tuner);
741 }
716 742
717 switch (state->af9033_config[i].tuner) { 743 switch (state->af9033_config[i].tuner) {
718 case AF9033_TUNER_TUA9001: 744 case AF9033_TUNER_TUA9001:
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 2fd1c5e31a0f..339adce7c7a5 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
928 {USB_DEVICE(0x093a, 0x2620)}, 928 {USB_DEVICE(0x093a, 0x2620)},
929 {USB_DEVICE(0x093a, 0x2621)}, 929 {USB_DEVICE(0x093a, 0x2621)},
930 {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP}, 930 {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
931 {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
931 {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP}, 932 {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
932 {USB_DEVICE(0x093a, 0x2625)}, 933 {USB_DEVICE(0x093a, 0x2625)},
933 {USB_DEVICE(0x093a, 0x2626)}, 934 {USB_DEVICE(0x093a, 0x2626)},
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 0500c4175d5f..6bce01a674f9 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
82} 82}
83 83
84/*=========================================================================*/ 84/*=========================================================================*/
85/* bufffer bits */ 85/* buffer bits */
86 86
87/* function expects dev->io_mutex to be hold by caller */ 87/* function expects dev->io_mutex to be hold by caller */
88int hdpvr_cancel_queue(struct hdpvr_device *dev) 88int hdpvr_cancel_queue(struct hdpvr_device *dev)
@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
926 case V4L2_CID_MPEG_AUDIO_ENCODING: 926 case V4L2_CID_MPEG_AUDIO_ENCODING:
927 if (dev->flags & HDPVR_FLAG_AC3_CAP) { 927 if (dev->flags & HDPVR_FLAG_AC3_CAP) {
928 opt->audio_codec = ctrl->val; 928 opt->audio_codec = ctrl->val;
929 return hdpvr_set_audio(dev, opt->audio_input, 929 return hdpvr_set_audio(dev, opt->audio_input + 1,
930 opt->audio_codec); 930 opt->audio_codec);
931 } 931 }
932 return 0; 932 return 0;
@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
1198 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, 1198 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
1199 V4L2_CID_MPEG_AUDIO_ENCODING, 1199 V4L2_CID_MPEG_AUDIO_ENCODING,
1200 ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC, 1200 ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
1201 0x7, V4L2_MPEG_AUDIO_ENCODING_AAC); 1201 0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
1202 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, 1202 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
1203 V4L2_CID_MPEG_VIDEO_ENCODING, 1203 V4L2_CID_MPEG_VIDEO_ENCODING,
1204 V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3, 1204 V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 4ae54caadd03..ce1c9f5d9dee 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -610,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
610 aspect.denominator = 9; 610 aspect.denominator = 9;
611 } else if (ratio == 34) { 611 } else if (ratio == 34) {
612 aspect.numerator = 4; 612 aspect.numerator = 4;
613 aspect.numerator = 3; 613 aspect.denominator = 3;
614 } else if (ratio == 68) { 614 } else if (ratio == 68) {
615 aspect.numerator = 15; 615 aspect.numerator = 15;
616 aspect.numerator = 9; 616 aspect.denominator = 9;
617 } else { 617 } else {
618 aspect.numerator = hor_landscape + 99; 618 aspect.numerator = hor_landscape + 99;
619 aspect.denominator = 100; 619 aspect.denominator = 100;
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index e4ec355704a6..a7543ba3e190 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -52,6 +52,11 @@
52/* Atmel chips */ 52/* Atmel chips */
53#define AT49BV640D 0x02de 53#define AT49BV640D 0x02de
54#define AT49BV640DT 0x02db 54#define AT49BV640DT 0x02db
55/* Sharp chips */
56#define LH28F640BFHE_PTTL90 0x00b0
57#define LH28F640BFHE_PBTL90 0x00b1
58#define LH28F640BFHE_PTTL70A 0x00b2
59#define LH28F640BFHE_PBTL70A 0x00b3
55 60
56static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 61static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
57static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 62static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -258,6 +263,36 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)
258 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e; 263 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
259}; 264};
260 265
266static int is_LH28F640BF(struct cfi_private *cfi)
267{
268 /* Sharp LH28F640BF Family */
269 if (cfi->mfr == CFI_MFR_SHARP && (
270 cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
271 cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
272 return 1;
273 return 0;
274}
275
276static void fixup_LH28F640BF(struct mtd_info *mtd)
277{
278 struct map_info *map = mtd->priv;
279 struct cfi_private *cfi = map->fldrv_priv;
280 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
281
282 /* Reset the Partition Configuration Register on LH28F640BF
283 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
284 if (is_LH28F640BF(cfi)) {
285 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
286 map_write(map, CMD(0x60), 0);
287 map_write(map, CMD(0x04), 0);
288
289 /* We have set one single partition thus
290 * Simultaneous Operations are not allowed */
291 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
292 extp->FeatureSupport &= ~512;
293 }
294}
295
261static void fixup_use_point(struct mtd_info *mtd) 296static void fixup_use_point(struct mtd_info *mtd)
262{ 297{
263 struct map_info *map = mtd->priv; 298 struct map_info *map = mtd->priv;
@@ -309,6 +344,8 @@ static struct cfi_fixup cfi_fixup_table[] = {
309 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct }, 344 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
310 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb }, 345 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
311 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock }, 346 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
347 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
348 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
312 { 0, 0, NULL } 349 { 0, 0, NULL }
313}; 350};
314 351
@@ -1649,6 +1686,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1649 initial_adr = adr; 1686 initial_adr = adr;
1650 cmd_adr = adr & ~(wbufsize-1); 1687 cmd_adr = adr & ~(wbufsize-1);
1651 1688
1689 /* Sharp LH28F640BF chips need the first address for the
1690 * Page Buffer Program command. See Table 5 of
1691 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1692 if (is_LH28F640BF(cfi))
1693 cmd_adr = adr;
1694
1652 /* Let's determine this according to the interleave only once */ 1695 /* Let's determine this according to the interleave only once */
1653 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9); 1696 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1654 1697
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
index 7df86948e6d4..b4f61c7fc161 100644
--- a/drivers/mtd/devices/elm.c
+++ b/drivers/mtd/devices/elm.c
@@ -475,6 +475,7 @@ static int elm_context_save(struct elm_info *info)
475 ELM_SYNDROME_FRAGMENT_1 + offset); 475 ELM_SYNDROME_FRAGMENT_1 + offset);
476 regs->elm_syndrome_fragment_0[i] = elm_read_reg(info, 476 regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
477 ELM_SYNDROME_FRAGMENT_0 + offset); 477 ELM_SYNDROME_FRAGMENT_0 + offset);
478 break;
478 default: 479 default:
479 return -EINVAL; 480 return -EINVAL;
480 } 481 }
@@ -520,6 +521,7 @@ static int elm_context_restore(struct elm_info *info)
520 regs->elm_syndrome_fragment_1[i]); 521 regs->elm_syndrome_fragment_1[i]);
521 elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset, 522 elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
522 regs->elm_syndrome_fragment_0[i]); 523 regs->elm_syndrome_fragment_0[i]);
524 break;
523 default: 525 default:
524 return -EINVAL; 526 return -EINVAL;
525 } 527 }
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 41167e9e991e..4f3e80c68a26 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4047,8 +4047,10 @@ int nand_scan_tail(struct mtd_info *mtd)
4047 ecc->layout->oobavail += ecc->layout->oobfree[i].length; 4047 ecc->layout->oobavail += ecc->layout->oobfree[i].length;
4048 mtd->oobavail = ecc->layout->oobavail; 4048 mtd->oobavail = ecc->layout->oobavail;
4049 4049
4050 /* ECC sanity check: warn noisily if it's too weak */ 4050 /* ECC sanity check: warn if it's too weak */
4051 WARN_ON(!nand_ecc_strength_good(mtd)); 4051 if (!nand_ecc_strength_good(mtd))
4052 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
4053 mtd->name);
4052 4054
4053 /* 4055 /*
4054 * Set the number of read / write steps for one page depending on ECC 4056 * Set the number of read / write steps for one page depending on ECC
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index b04e7d059888..0431b46d9fd9 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -125,7 +125,7 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
125 parent = *p; 125 parent = *p;
126 av = rb_entry(parent, struct ubi_ainf_volume, rb); 126 av = rb_entry(parent, struct ubi_ainf_volume, rb);
127 127
128 if (vol_id < av->vol_id) 128 if (vol_id > av->vol_id)
129 p = &(*p)->rb_left; 129 p = &(*p)->rb_left;
130 else 130 else
131 p = &(*p)->rb_right; 131 p = &(*p)->rb_right;
@@ -423,7 +423,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
423 pnum, err); 423 pnum, err);
424 ret = err > 0 ? UBI_BAD_FASTMAP : err; 424 ret = err > 0 ? UBI_BAD_FASTMAP : err;
425 goto out; 425 goto out;
426 } else if (ret == UBI_IO_BITFLIPS) 426 } else if (err == UBI_IO_BITFLIPS)
427 scrub = 1; 427 scrub = 1;
428 428
429 /* 429 /*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3a451b6cd3d5..701f86cd5993 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4068,7 +4068,7 @@ static int bond_check_params(struct bond_params *params)
4068 } 4068 }
4069 4069
4070 if (ad_select) { 4070 if (ad_select) {
4071 bond_opt_initstr(&newval, lacp_rate); 4071 bond_opt_initstr(&newval, ad_select);
4072 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT), 4072 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
4073 &newval); 4073 &newval);
4074 if (!valptr) { 4074 if (!valptr) {
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 824108cd9fd5..12430be6448a 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -287,7 +287,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
287 break; 287 break;
288 } 288 }
289 289
290 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); 290 priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
291 resource_size(res));
291 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) 292 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
292 dev_info(&pdev->dev, "control memory is not used for raminit\n"); 293 dev_info(&pdev->dev, "control memory is not used for raminit\n");
293 else 294 else
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index c83584a26713..5a1891faba8a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -339,7 +339,8 @@ static int xgbe_probe(struct platform_device *pdev)
339 /* Calculate the number of Tx and Rx rings to be created */ 339 /* Calculate the number of Tx and Rx rings to be created */
340 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), 340 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
341 pdata->hw_feat.tx_ch_cnt); 341 pdata->hw_feat.tx_ch_cnt);
342 if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) { 342 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
343 if (ret) {
343 dev_err(dev, "error setting real tx queue count\n"); 344 dev_err(dev, "error setting real tx queue count\n");
344 goto err_io; 345 goto err_io;
345 } 346 }
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 141160ef249a..5776e503e4c5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -654,13 +654,13 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
654 654
655 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 655 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
656 656
657 if (work_done < budget) { 657 if (work_done == 0) {
658 napi_complete(napi); 658 napi_complete(napi);
659 /* re-enable TX interrupt */ 659 /* re-enable TX interrupt */
660 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 660 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
661 } 661 }
662 662
663 return work_done; 663 return 0;
664} 664}
665 665
666static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 666static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@@ -1254,28 +1254,17 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1254 usleep_range(1000, 2000); 1254 usleep_range(1000, 2000);
1255} 1255}
1256 1256
1257static inline int umac_reset(struct bcm_sysport_priv *priv) 1257static inline void umac_reset(struct bcm_sysport_priv *priv)
1258{ 1258{
1259 unsigned int timeout = 0;
1260 u32 reg; 1259 u32 reg;
1261 int ret = 0;
1262
1263 umac_writel(priv, 0, UMAC_CMD);
1264 while (timeout++ < 1000) {
1265 reg = umac_readl(priv, UMAC_CMD);
1266 if (!(reg & CMD_SW_RESET))
1267 break;
1268
1269 udelay(1);
1270 }
1271
1272 if (timeout == 1000) {
1273 dev_err(&priv->pdev->dev,
1274 "timeout waiting for MAC to come out of reset\n");
1275 ret = -ETIMEDOUT;
1276 }
1277 1260
1278 return ret; 1261 reg = umac_readl(priv, UMAC_CMD);
1262 reg |= CMD_SW_RESET;
1263 umac_writel(priv, reg, UMAC_CMD);
1264 udelay(10);
1265 reg = umac_readl(priv, UMAC_CMD);
1266 reg &= ~CMD_SW_RESET;
1267 umac_writel(priv, reg, UMAC_CMD);
1279} 1268}
1280 1269
1281static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1270static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
@@ -1303,11 +1292,7 @@ static int bcm_sysport_open(struct net_device *dev)
1303 int ret; 1292 int ret;
1304 1293
1305 /* Reset UniMAC */ 1294 /* Reset UniMAC */
1306 ret = umac_reset(priv); 1295 umac_reset(priv);
1307 if (ret) {
1308 netdev_err(dev, "UniMAC reset failed\n");
1309 return ret;
1310 }
1311 1296
1312 /* Flush TX and RX FIFOs at TOPCTRL level */ 1297 /* Flush TX and RX FIFOs at TOPCTRL level */
1313 topctrl_flush(priv); 1298 topctrl_flush(priv);
@@ -1589,12 +1574,6 @@ static int bcm_sysport_probe(struct platform_device *pdev)
1589 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 1574 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
1590 dev->needed_headroom += sizeof(struct bcm_tsb); 1575 dev->needed_headroom += sizeof(struct bcm_tsb);
1591 1576
1592 /* We are interfaced to a switch which handles the multicast
1593 * filtering for us, so we do not support programming any
1594 * multicast hash table in this Ethernet MAC.
1595 */
1596 dev->flags &= ~IFF_MULTICAST;
1597
1598 /* libphy will adjust the link state accordingly */ 1577 /* libphy will adjust the link state accordingly */
1599 netif_carrier_off(dev); 1578 netif_carrier_off(dev);
1600 1579
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4cab09d3f807..8206a293e6b4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -346,6 +346,7 @@ struct sw_tx_bd {
346 u8 flags; 346 u8 flags;
347/* Set on the first BD descriptor when there is a split BD */ 347/* Set on the first BD descriptor when there is a split BD */
348#define BNX2X_TSO_SPLIT_BD (1<<0) 348#define BNX2X_TSO_SPLIT_BD (1<<0)
349#define BNX2X_HAS_SECOND_PBD (1<<1)
349}; 350};
350 351
351struct sw_rx_page { 352struct sw_rx_page {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 47c5814114e1..c43e7238de21 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -227,6 +227,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
227 --nbd; 227 --nbd;
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229 229
230 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231 /* Skip second parse bd... */
232 --nbd;
233 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
234 }
235
230 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ 236 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
231 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 237 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
232 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; 238 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
@@ -797,7 +803,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
797 803
798 return; 804 return;
799 } 805 }
800 bnx2x_frag_free(fp, new_data); 806 if (new_data)
807 bnx2x_frag_free(fp, new_data);
801drop: 808drop:
802 /* drop the packet and keep the buffer in the bin */ 809 /* drop the packet and keep the buffer in the bin */
803 DP(NETIF_MSG_RX_STATUS, 810 DP(NETIF_MSG_RX_STATUS,
@@ -3888,6 +3895,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3888 /* set encapsulation flag in start BD */ 3895 /* set encapsulation flag in start BD */
3889 SET_FLAG(tx_start_bd->general_data, 3896 SET_FLAG(tx_start_bd->general_data,
3890 ETH_TX_START_BD_TUNNEL_EXIST, 1); 3897 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3898
3899 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3900
3891 nbd++; 3901 nbd++;
3892 } else if (xmit_type & XMIT_CSUM) { 3902 } else if (xmit_type & XMIT_CSUM) {
3893 /* Set PBD in checksum offload case w/o encapsulation */ 3903 /* Set PBD in checksum offload case w/o encapsulation */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index bd0600cf7266..25eddd90f482 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -379,6 +379,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
379 break; 379 break;
380 case PORT_FIBRE: 380 case PORT_FIBRE:
381 case PORT_DA: 381 case PORT_DA:
382 case PORT_NONE:
382 if (!(bp->port.supported[0] & SUPPORTED_FIBRE || 383 if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
383 bp->port.supported[1] & SUPPORTED_FIBRE)) { 384 bp->port.supported[1] & SUPPORTED_FIBRE)) {
384 DP(BNX2X_MSG_ETHTOOL, 385 DP(BNX2X_MSG_ETHTOOL,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2887034523e0..6a8b1453a1b9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12937,7 +12937,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
12937 * without the default SB. 12937 * without the default SB.
12938 * For VFs there is no default SB, then we return (index+1). 12938 * For VFs there is no default SB, then we return (index+1).
12939 */ 12939 */
12940 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control); 12940 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
12941 12941
12942 index = control & PCI_MSIX_FLAGS_QSIZE; 12942 index = control & PCI_MSIX_FLAGS_QSIZE;
12943 12943
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 5ba1cfbd60da..4e615debe472 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1149,6 +1149,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1149 goto out; 1149 goto out;
1150 } 1150 }
1151 1151
1152 if (skb_padto(skb, ETH_ZLEN)) {
1153 ret = NETDEV_TX_OK;
1154 goto out;
1155 }
1156
1152 /* set the SKB transmit checksum */ 1157 /* set the SKB transmit checksum */
1153 if (priv->desc_64b_en) { 1158 if (priv->desc_64b_en) {
1154 ret = bcmgenet_put_tx_csum(dev, skb); 1159 ret = bcmgenet_put_tx_csum(dev, skb);
@@ -1408,13 +1413,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
1408 if (cb->skb) 1413 if (cb->skb)
1409 continue; 1414 continue;
1410 1415
1411 /* set the DMA descriptor length once and for all
1412 * it will only change if we support dynamically sizing
1413 * priv->rx_buf_len, but we do not
1414 */
1415 dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
1416 priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
1417
1418 ret = bcmgenet_rx_refill(priv, cb); 1416 ret = bcmgenet_rx_refill(priv, cb);
1419 if (ret) 1417 if (ret)
1420 break; 1418 break;
@@ -2535,14 +2533,17 @@ static int bcmgenet_probe(struct platform_device *pdev)
2535 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); 2533 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
2536 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); 2534 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
2537 2535
2538 err = register_netdev(dev); 2536 /* libphy will determine the link state */
2539 if (err) 2537 netif_carrier_off(dev);
2540 goto err_clk_disable;
2541 2538
2542 /* Turn off the main clock, WOL clock is handled separately */ 2539 /* Turn off the main clock, WOL clock is handled separately */
2543 if (!IS_ERR(priv->clk)) 2540 if (!IS_ERR(priv->clk))
2544 clk_disable_unprepare(priv->clk); 2541 clk_disable_unprepare(priv->clk);
2545 2542
2543 err = register_netdev(dev);
2544 if (err)
2545 goto err;
2546
2546 return err; 2547 return err;
2547 2548
2548err_clk_disable: 2549err_clk_disable:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 0f117105fed1..e23c993b1362 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -331,9 +331,9 @@ struct bcmgenet_mib_counters {
331#define EXT_ENERGY_DET_MASK (1 << 12) 331#define EXT_ENERGY_DET_MASK (1 << 12)
332 332
333#define EXT_RGMII_OOB_CTRL 0x0C 333#define EXT_RGMII_OOB_CTRL 0x0C
334#define RGMII_MODE_EN (1 << 0)
335#define RGMII_LINK (1 << 4) 334#define RGMII_LINK (1 << 4)
336#define OOB_DISABLE (1 << 5) 335#define OOB_DISABLE (1 << 5)
336#define RGMII_MODE_EN (1 << 6)
337#define ID_MODE_DIS (1 << 16) 337#define ID_MODE_DIS (1 << 16)
338 338
339#define EXT_GPHY_CTRL 0x1C 339#define EXT_GPHY_CTRL 0x1C
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 34a26e42f19d..1e187fb760f8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2902,7 +2902,7 @@ static int be_open(struct net_device *netdev)
2902 for_all_evt_queues(adapter, eqo, i) { 2902 for_all_evt_queues(adapter, eqo, i) {
2903 napi_enable(&eqo->napi); 2903 napi_enable(&eqo->napi);
2904 be_enable_busy_poll(eqo); 2904 be_enable_busy_poll(eqo);
2905 be_eq_notify(adapter, eqo->q.id, true, false, 0); 2905 be_eq_notify(adapter, eqo->q.id, true, true, 0);
2906 } 2906 }
2907 adapter->flags |= BE_FLAGS_NAPI_ENABLED; 2907 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2908 2908
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index fab39e295441..36fc429298e3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -2990,11 +2990,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2990 if (ug_info->rxExtendedFiltering) { 2990 if (ug_info->rxExtendedFiltering) {
2991 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 2991 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
2992 if (ug_info->largestexternallookupkeysize == 2992 if (ug_info->largestexternallookupkeysize ==
2993 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 2993 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
2994 size += 2994 size +=
2995 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 2995 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
2996 if (ug_info->largestexternallookupkeysize == 2996 if (ug_info->largestexternallookupkeysize ==
2997 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 2997 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
2998 size += 2998 size +=
2999 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 2999 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3000 } 3000 }
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a2db388cc31e..ee74f9536b31 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1481,6 +1481,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
1481 s32 ret_val; 1481 s32 ret_val;
1482 u16 i, rar_count = mac->rar_entry_count; 1482 u16 i, rar_count = mac->rar_entry_count;
1483 1483
1484 if ((hw->mac.type >= e1000_i210) &&
1485 !(igb_get_flash_presence_i210(hw))) {
1486 ret_val = igb_pll_workaround_i210(hw);
1487 if (ret_val)
1488 return ret_val;
1489 }
1490
1484 /* Initialize identification LED */ 1491 /* Initialize identification LED */
1485 ret_val = igb_id_led_init(hw); 1492 ret_val = igb_id_led_init(hw);
1486 if (ret_val) { 1493 if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 2a8bb35c2df2..217f8138851b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -46,14 +46,15 @@
46#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ 46#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
47 47
48/* Physical Func Reset Done Indication */ 48/* Physical Func Reset Done Indication */
49#define E1000_CTRL_EXT_PFRSTD 0x00004000 49#define E1000_CTRL_EXT_PFRSTD 0x00004000
50#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 50#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
51#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 51#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
52#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 52#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
53#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 53#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
54#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 54#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
55#define E1000_CTRL_EXT_EIAME 0x01000000 55#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
56#define E1000_CTRL_EXT_IRCA 0x00000001 56#define E1000_CTRL_EXT_EIAME 0x01000000
57#define E1000_CTRL_EXT_IRCA 0x00000001
57/* Interrupt delay cancellation */ 58/* Interrupt delay cancellation */
58/* Driver loaded bit for FW */ 59/* Driver loaded bit for FW */
59#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 60#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
@@ -62,6 +63,7 @@
62/* packet buffer parity error detection enabled */ 63/* packet buffer parity error detection enabled */
63/* descriptor FIFO parity error detection enable */ 64/* descriptor FIFO parity error detection enable */
64#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ 65#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
66#define E1000_CTRL_EXT_PHYPDEN 0x00100000
65#define E1000_I2CCMD_REG_ADDR_SHIFT 16 67#define E1000_I2CCMD_REG_ADDR_SHIFT 16
66#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 68#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
67#define E1000_I2CCMD_OPCODE_READ 0x08000000 69#define E1000_I2CCMD_OPCODE_READ 0x08000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 89925e405849..ce55ea5d750c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -567,4 +567,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
567/* These functions must be implemented by drivers */ 567/* These functions must be implemented by drivers */
568s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 568s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
569s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 569s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
570
571void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
572void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
570#endif /* _E1000_HW_H_ */ 573#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 337161f440dd..65d931669f81 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -834,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
834 } 834 }
835 return ret_val; 835 return ret_val;
836} 836}
837
838/**
839 * igb_pll_workaround_i210
840 * @hw: pointer to the HW structure
841 *
842 * Works around an errata in the PLL circuit where it occasionally
843 * provides the wrong clock frequency after power up.
844 **/
845s32 igb_pll_workaround_i210(struct e1000_hw *hw)
846{
847 s32 ret_val;
848 u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
849 u16 nvm_word, phy_word, pci_word, tmp_nvm;
850 int i;
851
852 /* Get and set needed register values */
853 wuc = rd32(E1000_WUC);
854 mdicnfg = rd32(E1000_MDICNFG);
855 reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
856 wr32(E1000_MDICNFG, reg_val);
857
858 /* Get data from NVM, or set default */
859 ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
860 &nvm_word);
861 if (ret_val)
862 nvm_word = E1000_INVM_DEFAULT_AL;
863 tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
864 for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
865 /* check current state directly from internal PHY */
866 igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
867 E1000_PHY_PLL_FREQ_REG), &phy_word);
868 if ((phy_word & E1000_PHY_PLL_UNCONF)
869 != E1000_PHY_PLL_UNCONF) {
870 ret_val = 0;
871 break;
872 } else {
873 ret_val = -E1000_ERR_PHY;
874 }
875 /* directly reset the internal PHY */
876 ctrl = rd32(E1000_CTRL);
877 wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
878
879 ctrl_ext = rd32(E1000_CTRL_EXT);
880 ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
881 wr32(E1000_CTRL_EXT, ctrl_ext);
882
883 wr32(E1000_WUC, 0);
884 reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
885 wr32(E1000_EEARBC_I210, reg_val);
886
887 igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
888 pci_word |= E1000_PCI_PMCSR_D3;
889 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
890 usleep_range(1000, 2000);
891 pci_word &= ~E1000_PCI_PMCSR_D3;
892 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
893 reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
894 wr32(E1000_EEARBC_I210, reg_val);
895
896 /* restore WUC register */
897 wr32(E1000_WUC, wuc);
898 }
899 /* restore MDICNFG setting */
900 wr32(E1000_MDICNFG, mdicnfg);
901 return ret_val;
902}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 9f34976687ba..3442b6357d01 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -33,6 +33,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
33s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); 33s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
34s32 igb_init_nvm_params_i210(struct e1000_hw *hw); 34s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
35bool igb_get_flash_presence_i210(struct e1000_hw *hw); 35bool igb_get_flash_presence_i210(struct e1000_hw *hw);
36s32 igb_pll_workaround_i210(struct e1000_hw *hw);
36 37
37#define E1000_STM_OPCODE 0xDB00 38#define E1000_STM_OPCODE 0xDB00
38#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 39#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -78,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
78#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 79#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
79#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C 80#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
80 81
82/* PLL Defines */
83#define E1000_PCI_PMCSR 0x44
84#define E1000_PCI_PMCSR_D3 0x03
85#define E1000_MAX_PLL_TRIES 5
86#define E1000_PHY_PLL_UNCONF 0xFF
87#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
88#define E1000_PHY_PLL_FREQ_REG 0x000E
89#define E1000_INVM_DEFAULT_AL 0x202F
90#define E1000_INVM_AUTOLOAD 0x0A
91#define E1000_INVM_PLL_WO_VAL 0x0010
92
81#endif 93#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 1cc4b1a7e597..f5ba4e4eafb9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -66,6 +66,7 @@
66#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 66#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
67#define E1000_PBS 0x01008 /* Packet Buffer Size */ 67#define E1000_PBS 0x01008 /* Packet Buffer Size */
68#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ 68#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
69#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
69#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ 70#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
70#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ 71#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
71#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ 72#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f145adbb55ac..a9537ba7a5a0 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7215,6 +7215,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7215 } 7215 }
7216} 7216}
7217 7217
7218void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
7219{
7220 struct igb_adapter *adapter = hw->back;
7221
7222 pci_read_config_word(adapter->pdev, reg, value);
7223}
7224
7225void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
7226{
7227 struct igb_adapter *adapter = hw->back;
7228
7229 pci_write_config_word(adapter->pdev, reg, *value);
7230}
7231
7218s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) 7232s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
7219{ 7233{
7220 struct igb_adapter *adapter = hw->back; 7234 struct igb_adapter *adapter = hw->back;
@@ -7578,6 +7592,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
7578 7592
7579 if (netif_running(netdev)) 7593 if (netif_running(netdev))
7580 igb_close(netdev); 7594 igb_close(netdev);
7595 else
7596 igb_reset(adapter);
7581 7597
7582 igb_clear_interrupt_scheme(adapter); 7598 igb_clear_interrupt_scheme(adapter);
7583 7599
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 45beca17fa50..dadd9a5f6323 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1207,7 +1207,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1207 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1207 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1208 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1208 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1209 1209
1210 if (l3_proto == swab16(ETH_P_IP)) 1210 if (l3_proto == htons(ETH_P_IP))
1211 command |= MVNETA_TXD_IP_CSUM; 1211 command |= MVNETA_TXD_IP_CSUM;
1212 else 1212 else
1213 command |= MVNETA_TX_L3_IP6; 1213 command |= MVNETA_TX_L3_IP6;
@@ -2529,7 +2529,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
2529 2529
2530 if (phydev->speed == SPEED_1000) 2530 if (phydev->speed == SPEED_1000)
2531 val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 2531 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2532 else 2532 else if (phydev->speed == SPEED_100)
2533 val |= MVNETA_GMAC_CONFIG_MII_SPEED; 2533 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2534 2534
2535 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 2535 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 80f725228f5b..56022d647837 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
294 init_completion(&cq->free); 294 init_completion(&cq->free);
295 295
296 cq->irq = priv->eq_table.eq[cq->vector].irq; 296 cq->irq = priv->eq_table.eq[cq->vector].irq;
297 cq->irq_affinity_change = false;
298
299 return 0; 297 return 0;
300 298
301err_radix: 299err_radix:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 4b2130760eed..82322b1c8411 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -128,11 +128,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
128 mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", 128 mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
129 name); 129 name);
130 } 130 }
131
131 } 132 }
132 } else { 133 } else {
133 cq->vector = (cq->ring + 1 + priv->port) % 134 cq->vector = (cq->ring + 1 + priv->port) %
134 mdev->dev->caps.num_comp_vectors; 135 mdev->dev->caps.num_comp_vectors;
135 } 136 }
137
138 cq->irq_desc =
139 irq_to_desc(mlx4_eq_get_irq(mdev->dev,
140 cq->vector));
136 } else { 141 } else {
137 /* For TX we use the same irq per 142 /* For TX we use the same irq per
138 ring we assigned for the RX */ 143 ring we assigned for the RX */
@@ -187,8 +192,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
187 mlx4_en_unmap_buffer(&cq->wqres.buf); 192 mlx4_en_unmap_buffer(&cq->wqres.buf);
188 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 193 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
189 if (priv->mdev->dev->caps.comp_pool && cq->vector) { 194 if (priv->mdev->dev->caps.comp_pool && cq->vector) {
190 if (!cq->is_tx)
191 irq_set_affinity_hint(cq->mcq.irq, NULL);
192 mlx4_release_eq(priv->mdev->dev, cq->vector); 195 mlx4_release_eq(priv->mdev->dev, cq->vector);
193 } 196 }
194 cq->vector = 0; 197 cq->vector = 0;
@@ -204,6 +207,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
204 if (!cq->is_tx) { 207 if (!cq->is_tx) {
205 napi_hash_del(&cq->napi); 208 napi_hash_del(&cq->napi);
206 synchronize_rcu(); 209 synchronize_rcu();
210 irq_set_affinity_hint(cq->mcq.irq, NULL);
207 } 211 }
208 netif_napi_del(&cq->napi); 212 netif_napi_del(&cq->napi);
209 213
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index fa1a069e14e6..68d763d2d030 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -417,6 +417,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
417 417
418 coal->tx_coalesce_usecs = priv->tx_usecs; 418 coal->tx_coalesce_usecs = priv->tx_usecs;
419 coal->tx_max_coalesced_frames = priv->tx_frames; 419 coal->tx_max_coalesced_frames = priv->tx_frames;
420 coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
421
420 coal->rx_coalesce_usecs = priv->rx_usecs; 422 coal->rx_coalesce_usecs = priv->rx_usecs;
421 coal->rx_max_coalesced_frames = priv->rx_frames; 423 coal->rx_max_coalesced_frames = priv->rx_frames;
422 424
@@ -426,6 +428,7 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
426 coal->rx_coalesce_usecs_high = priv->rx_usecs_high; 428 coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
427 coal->rate_sample_interval = priv->sample_interval; 429 coal->rate_sample_interval = priv->sample_interval;
428 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; 430 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
431
429 return 0; 432 return 0;
430} 433}
431 434
@@ -434,6 +437,9 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
434{ 437{
435 struct mlx4_en_priv *priv = netdev_priv(dev); 438 struct mlx4_en_priv *priv = netdev_priv(dev);
436 439
440 if (!coal->tx_max_coalesced_frames_irq)
441 return -EINVAL;
442
437 priv->rx_frames = (coal->rx_max_coalesced_frames == 443 priv->rx_frames = (coal->rx_max_coalesced_frames ==
438 MLX4_EN_AUTO_CONF) ? 444 MLX4_EN_AUTO_CONF) ?
439 MLX4_EN_RX_COAL_TARGET : 445 MLX4_EN_RX_COAL_TARGET :
@@ -457,6 +463,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
457 priv->rx_usecs_high = coal->rx_coalesce_usecs_high; 463 priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
458 priv->sample_interval = coal->rate_sample_interval; 464 priv->sample_interval = coal->rate_sample_interval;
459 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; 465 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
466 priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
460 467
461 return mlx4_en_moderation_update(priv); 468 return mlx4_en_moderation_update(priv);
462} 469}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7d4fb7bf2593..7345c43b019e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2336,7 +2336,7 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev,
2336 struct mlx4_en_priv *priv = netdev_priv(dev); 2336 struct mlx4_en_priv *priv = netdev_priv(dev);
2337 __be16 current_port; 2337 __be16 current_port;
2338 2338
2339 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)) 2339 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2340 return; 2340 return;
2341 2341
2342 if (sa_family == AF_INET6) 2342 if (sa_family == AF_INET6)
@@ -2473,6 +2473,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2473 MLX4_WQE_CTRL_SOLICITED); 2473 MLX4_WQE_CTRL_SOLICITED);
2474 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2474 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2475 priv->tx_ring_num = prof->tx_ring_num; 2475 priv->tx_ring_num = prof->tx_ring_num;
2476 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
2476 2477
2477 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, 2478 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2478 GFP_KERNEL); 2479 GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index d2d415732d99..5535862f27cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -40,6 +40,7 @@
40#include <linux/if_ether.h> 40#include <linux/if_ether.h>
41#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
42#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
43#include <linux/irq.h>
43 44
44#include "mlx4_en.h" 45#include "mlx4_en.h"
45 46
@@ -782,6 +783,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
782 PKT_HASH_TYPE_L3); 783 PKT_HASH_TYPE_L3);
783 784
784 skb_record_rx_queue(gro_skb, cq->ring); 785 skb_record_rx_queue(gro_skb, cq->ring);
786 skb_mark_napi_id(gro_skb, &cq->napi);
785 787
786 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { 788 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
787 timestamp = mlx4_en_get_cqe_ts(cqe); 789 timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -896,16 +898,25 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
896 898
897 /* If we used up all the quota - we're probably not done yet... */ 899 /* If we used up all the quota - we're probably not done yet... */
898 if (done == budget) { 900 if (done == budget) {
901 int cpu_curr;
902 const struct cpumask *aff;
903
899 INC_PERF_COUNTER(priv->pstats.napi_quota); 904 INC_PERF_COUNTER(priv->pstats.napi_quota);
900 if (unlikely(cq->mcq.irq_affinity_change)) { 905
901 cq->mcq.irq_affinity_change = false; 906 cpu_curr = smp_processor_id();
907 aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
908
909 if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
910 /* Current cpu is not according to smp_irq_affinity -
911 * probably affinity changed. need to stop this NAPI
912 * poll, and restart it on the right CPU
913 */
902 napi_complete(napi); 914 napi_complete(napi);
903 mlx4_en_arm_cq(priv, cq); 915 mlx4_en_arm_cq(priv, cq);
904 return 0; 916 return 0;
905 } 917 }
906 } else { 918 } else {
907 /* Done for now */ 919 /* Done for now */
908 cq->mcq.irq_affinity_change = false;
909 napi_complete(napi); 920 napi_complete(napi);
910 mlx4_en_arm_cq(priv, cq); 921 mlx4_en_arm_cq(priv, cq);
911 } 922 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8be7483f8236..5045bab59633 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -351,9 +351,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
351 return cnt; 351 return cnt;
352} 352}
353 353
354static int mlx4_en_process_tx_cq(struct net_device *dev, 354static bool mlx4_en_process_tx_cq(struct net_device *dev,
355 struct mlx4_en_cq *cq, 355 struct mlx4_en_cq *cq)
356 int budget)
357{ 356{
358 struct mlx4_en_priv *priv = netdev_priv(dev); 357 struct mlx4_en_priv *priv = netdev_priv(dev);
359 struct mlx4_cq *mcq = &cq->mcq; 358 struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +371,10 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
372 int factor = priv->cqe_factor; 371 int factor = priv->cqe_factor;
373 u64 timestamp = 0; 372 u64 timestamp = 0;
374 int done = 0; 373 int done = 0;
374 int budget = priv->tx_work_limit;
375 375
376 if (!priv->port_up) 376 if (!priv->port_up)
377 return 0; 377 return true;
378 378
379 index = cons_index & size_mask; 379 index = cons_index & size_mask;
380 cqe = &buf[(index << factor) + factor]; 380 cqe = &buf[(index << factor) + factor];
@@ -447,7 +447,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
447 netif_tx_wake_queue(ring->tx_queue); 447 netif_tx_wake_queue(ring->tx_queue);
448 ring->wake_queue++; 448 ring->wake_queue++;
449 } 449 }
450 return done; 450 return done < budget;
451} 451}
452 452
453void mlx4_en_tx_irq(struct mlx4_cq *mcq) 453void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -467,24 +467,16 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
467 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 467 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
468 struct net_device *dev = cq->dev; 468 struct net_device *dev = cq->dev;
469 struct mlx4_en_priv *priv = netdev_priv(dev); 469 struct mlx4_en_priv *priv = netdev_priv(dev);
470 int done; 470 int clean_complete;
471 471
472 done = mlx4_en_process_tx_cq(dev, cq, budget); 472 clean_complete = mlx4_en_process_tx_cq(dev, cq);
473 if (!clean_complete)
474 return budget;
473 475
474 /* If we used up all the quota - we're probably not done yet... */ 476 napi_complete(napi);
475 if (done < budget) { 477 mlx4_en_arm_cq(priv, cq);
476 /* Done for now */ 478
477 cq->mcq.irq_affinity_change = false; 479 return 0;
478 napi_complete(napi);
479 mlx4_en_arm_cq(priv, cq);
480 return done;
481 } else if (unlikely(cq->mcq.irq_affinity_change)) {
482 cq->mcq.irq_affinity_change = false;
483 napi_complete(napi);
484 mlx4_en_arm_cq(priv, cq);
485 return 0;
486 }
487 return budget;
488} 480}
489 481
490static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 482static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d954ec1eac17..2a004b347e1d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,11 +53,6 @@ enum {
53 MLX4_EQ_ENTRY_SIZE = 0x20 53 MLX4_EQ_ENTRY_SIZE = 0x20
54}; 54};
55 55
56struct mlx4_irq_notify {
57 void *arg;
58 struct irq_affinity_notify notify;
59};
60
61#define MLX4_EQ_STATUS_OK ( 0 << 28) 56#define MLX4_EQ_STATUS_OK ( 0 << 28)
62#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 57#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
63#define MLX4_EQ_OWNER_SW ( 0 << 24) 58#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -1088,57 +1083,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
1088 iounmap(priv->clr_base); 1083 iounmap(priv->clr_base);
1089} 1084}
1090 1085
1091static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
1092 const cpumask_t *mask)
1093{
1094 struct mlx4_irq_notify *n = container_of(notify,
1095 struct mlx4_irq_notify,
1096 notify);
1097 struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
1098 struct radix_tree_iter iter;
1099 void **slot;
1100
1101 radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
1102 struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
1103
1104 if (cq->irq == notify->irq)
1105 cq->irq_affinity_change = true;
1106 }
1107}
1108
1109static void mlx4_release_irq_notifier(struct kref *ref)
1110{
1111 struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
1112 notify.kref);
1113 kfree(n);
1114}
1115
1116static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
1117 struct mlx4_dev *dev, int irq)
1118{
1119 struct mlx4_irq_notify *irq_notifier = NULL;
1120 int err = 0;
1121
1122 irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
1123 if (!irq_notifier) {
1124 mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
1125 irq);
1126 return;
1127 }
1128
1129 irq_notifier->notify.irq = irq;
1130 irq_notifier->notify.notify = mlx4_irq_notifier_notify;
1131 irq_notifier->notify.release = mlx4_release_irq_notifier;
1132 irq_notifier->arg = priv;
1133 err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
1134 if (err) {
1135 kfree(irq_notifier);
1136 irq_notifier = NULL;
1137 mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
1138 }
1139}
1140
1141
1142int mlx4_alloc_eq_table(struct mlx4_dev *dev) 1086int mlx4_alloc_eq_table(struct mlx4_dev *dev)
1143{ 1087{
1144 struct mlx4_priv *priv = mlx4_priv(dev); 1088 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1409,8 +1353,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1409 continue; 1353 continue;
1410 /*we dont want to break here*/ 1354 /*we dont want to break here*/
1411 } 1355 }
1412 mlx4_assign_irq_notifier(priv, dev,
1413 priv->eq_table.eq[vec].irq);
1414 1356
1415 eq_set_ci(&priv->eq_table.eq[vec], 1); 1357 eq_set_ci(&priv->eq_table.eq[vec], 1);
1416 } 1358 }
@@ -1427,6 +1369,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1427} 1369}
1428EXPORT_SYMBOL(mlx4_assign_eq); 1370EXPORT_SYMBOL(mlx4_assign_eq);
1429 1371
1372int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
1373{
1374 struct mlx4_priv *priv = mlx4_priv(dev);
1375
1376 return priv->eq_table.eq[vec].irq;
1377}
1378EXPORT_SYMBOL(mlx4_eq_get_irq);
1379
1430void mlx4_release_eq(struct mlx4_dev *dev, int vec) 1380void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1431{ 1381{
1432 struct mlx4_priv *priv = mlx4_priv(dev); 1382 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1438,9 +1388,6 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1438 Belonging to a legacy EQ*/ 1388 Belonging to a legacy EQ*/
1439 mutex_lock(&priv->msix_ctl.pool_lock); 1389 mutex_lock(&priv->msix_ctl.pool_lock);
1440 if (priv->msix_ctl.pool_bm & 1ULL << i) { 1390 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1441 irq_set_affinity_notifier(
1442 priv->eq_table.eq[vec].irq,
1443 NULL);
1444 free_irq(priv->eq_table.eq[vec].irq, 1391 free_irq(priv->eq_table.eq[vec].irq,
1445 &priv->eq_table.eq[vec]); 1392 &priv->eq_table.eq[vec]);
1446 priv->msix_ctl.pool_bm &= ~(1ULL << i); 1393 priv->msix_ctl.pool_bm &= ~(1ULL << i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 0e15295bedd6..d72a5a894fc6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -126,6 +126,8 @@ enum {
126#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \ 126#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
127 MLX4_EN_NUM_UP) 127 MLX4_EN_NUM_UP)
128 128
129#define MLX4_EN_DEFAULT_TX_WORK 256
130
129/* Target number of packets to coalesce with interrupt moderation */ 131/* Target number of packets to coalesce with interrupt moderation */
130#define MLX4_EN_RX_COAL_TARGET 44 132#define MLX4_EN_RX_COAL_TARGET 44
131#define MLX4_EN_RX_COAL_TIME 0x10 133#define MLX4_EN_RX_COAL_TIME 0x10
@@ -343,6 +345,7 @@ struct mlx4_en_cq {
343#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) 345#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
344 spinlock_t poll_lock; /* protects from LLS/napi conflicts */ 346 spinlock_t poll_lock; /* protects from LLS/napi conflicts */
345#endif /* CONFIG_NET_RX_BUSY_POLL */ 347#endif /* CONFIG_NET_RX_BUSY_POLL */
348 struct irq_desc *irq_desc;
346}; 349};
347 350
348struct mlx4_en_port_profile { 351struct mlx4_en_port_profile {
@@ -542,6 +545,7 @@ struct mlx4_en_priv {
542 __be32 ctrl_flags; 545 __be32 ctrl_flags;
543 u32 flags; 546 u32 flags;
544 u8 num_tx_rings_p_up; 547 u8 num_tx_rings_p_up;
548 u32 tx_work_limit;
545 u32 tx_ring_num; 549 u32 tx_ring_num;
546 u32 rx_ring_num; 550 u32 rx_ring_num;
547 u32 rx_skb_size; 551 u32 rx_skb_size;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index ba0401d4af50..184c3615f479 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -94,6 +94,11 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
94 write_lock_irq(&table->lock); 94 write_lock_irq(&table->lock);
95 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr); 95 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
96 write_unlock_irq(&table->lock); 96 write_unlock_irq(&table->lock);
97 if (err) {
98 mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
99 mlx5_base_mkey(mr->key), err);
100 mlx5_core_destroy_mkey(dev, mr);
101 }
97 102
98 return err; 103 return err;
99} 104}
@@ -104,12 +109,22 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
104 struct mlx5_mr_table *table = &dev->priv.mr_table; 109 struct mlx5_mr_table *table = &dev->priv.mr_table;
105 struct mlx5_destroy_mkey_mbox_in in; 110 struct mlx5_destroy_mkey_mbox_in in;
106 struct mlx5_destroy_mkey_mbox_out out; 111 struct mlx5_destroy_mkey_mbox_out out;
112 struct mlx5_core_mr *deleted_mr;
107 unsigned long flags; 113 unsigned long flags;
108 int err; 114 int err;
109 115
110 memset(&in, 0, sizeof(in)); 116 memset(&in, 0, sizeof(in));
111 memset(&out, 0, sizeof(out)); 117 memset(&out, 0, sizeof(out));
112 118
119 write_lock_irqsave(&table->lock, flags);
120 deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
121 write_unlock_irqrestore(&table->lock, flags);
122 if (!deleted_mr) {
123 mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n",
124 mlx5_base_mkey(mr->key));
125 return -ENOENT;
126 }
127
113 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); 128 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
114 in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); 129 in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
115 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 130 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
@@ -119,10 +134,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
119 if (out.hdr.status) 134 if (out.hdr.status)
120 return mlx5_cmd_status_to_err(&out.hdr); 135 return mlx5_cmd_status_to_err(&out.hdr);
121 136
122 write_lock_irqsave(&table->lock, flags);
123 radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
124 write_unlock_irqrestore(&table->lock, flags);
125
126 return err; 137 return err;
127} 138}
128EXPORT_SYMBOL(mlx5_core_destroy_mkey); 139EXPORT_SYMBOL(mlx5_core_destroy_mkey);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index be425ad5e824..61623e9af574 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -538,6 +538,7 @@ enum rtl_register_content {
538 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ 538 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
539 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ 539 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
540 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ 540 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
541 Rdy_to_L23 = (1 << 1), /* L23 Enable */
541 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ 542 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
542 543
543 /* Config4 register */ 544 /* Config4 register */
@@ -4239,6 +4240,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4239 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4240 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4240 break; 4241 break;
4241 case RTL_GIGA_MAC_VER_40: 4242 case RTL_GIGA_MAC_VER_40:
4243 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4244 break;
4242 case RTL_GIGA_MAC_VER_41: 4245 case RTL_GIGA_MAC_VER_41:
4243 case RTL_GIGA_MAC_VER_42: 4246 case RTL_GIGA_MAC_VER_42:
4244 case RTL_GIGA_MAC_VER_43: 4247 case RTL_GIGA_MAC_VER_43:
@@ -4897,6 +4900,21 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
4897 PCI_EXP_LNKCTL_CLKREQ_EN); 4900 PCI_EXP_LNKCTL_CLKREQ_EN);
4898} 4901}
4899 4902
4903static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
4904{
4905 void __iomem *ioaddr = tp->mmio_addr;
4906 u8 data;
4907
4908 data = RTL_R8(Config3);
4909
4910 if (enable)
4911 data |= Rdy_to_L23;
4912 else
4913 data &= ~Rdy_to_L23;
4914
4915 RTL_W8(Config3, data);
4916}
4917
4900#define R8168_CPCMD_QUIRK_MASK (\ 4918#define R8168_CPCMD_QUIRK_MASK (\
4901 EnableBist | \ 4919 EnableBist | \
4902 Mac_dbgo_oe | \ 4920 Mac_dbgo_oe | \
@@ -5246,6 +5264,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
5246 }; 5264 };
5247 5265
5248 rtl_hw_start_8168f(tp); 5266 rtl_hw_start_8168f(tp);
5267 rtl_pcie_state_l2l3_enable(tp, false);
5249 5268
5250 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); 5269 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5251 5270
@@ -5284,6 +5303,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5284 5303
5285 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); 5304 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
5286 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); 5305 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
5306
5307 rtl_pcie_state_l2l3_enable(tp, false);
5287} 5308}
5288 5309
5289static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) 5310static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5536,6 +5557,8 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5536 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5557 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5537 5558
5538 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); 5559 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5560
5561 rtl_pcie_state_l2l3_enable(tp, false);
5539} 5562}
5540 5563
5541static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) 5564static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
@@ -5571,6 +5594,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5571 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5594 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5572 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5595 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5573 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); 5596 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5597
5598 rtl_pcie_state_l2l3_enable(tp, false);
5574} 5599}
5575 5600
5576static void rtl_hw_start_8106(struct rtl8169_private *tp) 5601static void rtl_hw_start_8106(struct rtl8169_private *tp)
@@ -5583,6 +5608,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
5583 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); 5608 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5584 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); 5609 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5585 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); 5610 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5611
5612 rtl_pcie_state_l2l3_enable(tp, false);
5586} 5613}
5587 5614
5588static void rtl_hw_start_8101(struct net_device *dev) 5615static void rtl_hw_start_8101(struct net_device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b3e148ef5683..9d3748361a1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -320,11 +320,8 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
320 320
321static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart) 321static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
322{ 322{
323 u32 value;
324
325 value = readl(ioaddr + GMAC_AN_CTRL);
326 /* auto negotiation enable and External Loopback enable */ 323 /* auto negotiation enable and External Loopback enable */
327 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; 324 u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
328 325
329 if (restart) 326 if (restart)
330 value |= GMAC_AN_CTRL_RAN; 327 value |= GMAC_AN_CTRL_RAN;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7e6628a91514..1e2bcf5f89e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -145,7 +145,7 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
145 x->rx_msg_type_delay_req++; 145 x->rx_msg_type_delay_req++;
146 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP) 146 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
147 x->rx_msg_type_delay_resp++; 147 x->rx_msg_type_delay_resp++;
148 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) 148 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
149 x->rx_msg_type_pdelay_req++; 149 x->rx_msg_type_pdelay_req++;
150 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP) 150 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
151 x->rx_msg_type_pdelay_resp++; 151 x->rx_msg_type_pdelay_resp++;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1c24a8f368bd..d813bfb1a847 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -610,6 +610,13 @@ static int __vnet_tx_trigger(struct vnet_port *port)
610 return err; 610 return err;
611} 611}
612 612
613static inline bool port_is_up(struct vnet_port *vnet)
614{
615 struct vio_driver_state *vio = &vnet->vio;
616
617 return !!(vio->hs_state & VIO_HS_COMPLETE);
618}
619
613struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) 620struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
614{ 621{
615 unsigned int hash = vnet_hashfn(skb->data); 622 unsigned int hash = vnet_hashfn(skb->data);
@@ -617,14 +624,19 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
617 struct vnet_port *port; 624 struct vnet_port *port;
618 625
619 hlist_for_each_entry(port, hp, hash) { 626 hlist_for_each_entry(port, hp, hash) {
627 if (!port_is_up(port))
628 continue;
620 if (ether_addr_equal(port->raddr, skb->data)) 629 if (ether_addr_equal(port->raddr, skb->data))
621 return port; 630 return port;
622 } 631 }
623 port = NULL; 632 list_for_each_entry(port, &vp->port_list, list) {
624 if (!list_empty(&vp->port_list)) 633 if (!port->switch_port)
625 port = list_entry(vp->port_list.next, struct vnet_port, list); 634 continue;
626 635 if (!port_is_up(port))
627 return port; 636 continue;
637 return port;
638 }
639 return NULL;
628} 640}
629 641
630struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb) 642struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
@@ -1083,6 +1095,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
1083 return vp; 1095 return vp;
1084} 1096}
1085 1097
1098static void vnet_cleanup(void)
1099{
1100 struct vnet *vp;
1101 struct net_device *dev;
1102
1103 mutex_lock(&vnet_list_mutex);
1104 while (!list_empty(&vnet_list)) {
1105 vp = list_first_entry(&vnet_list, struct vnet, list);
1106 list_del(&vp->list);
1107 dev = vp->dev;
1108 /* vio_unregister_driver() should have cleaned up port_list */
1109 BUG_ON(!list_empty(&vp->port_list));
1110 unregister_netdev(dev);
1111 free_netdev(dev);
1112 }
1113 mutex_unlock(&vnet_list_mutex);
1114}
1115
1086static const char *local_mac_prop = "local-mac-address"; 1116static const char *local_mac_prop = "local-mac-address";
1087 1117
1088static struct vnet *vnet_find_parent(struct mdesc_handle *hp, 1118static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1270,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
1240 1270
1241 kfree(port); 1271 kfree(port);
1242 1272
1243 unregister_netdev(vp->dev);
1244 } 1273 }
1245 return 0; 1274 return 0;
1246} 1275}
@@ -1268,6 +1297,7 @@ static int __init vnet_init(void)
1268static void __exit vnet_exit(void) 1297static void __exit vnet_exit(void)
1269{ 1298{
1270 vio_unregister_driver(&vnet_port_driver); 1299 vio_unregister_driver(&vnet_port_driver);
1300 vnet_cleanup();
1271} 1301}
1272 1302
1273module_init(vnet_init); 1303module_init(vnet_init);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index eb78203cd58e..2aa57270838f 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -291,7 +291,11 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
291 291
292static int dfx_rcv_init(DFX_board_t *bp, int get_buffers); 292static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
293static void dfx_rcv_queue_process(DFX_board_t *bp); 293static void dfx_rcv_queue_process(DFX_board_t *bp);
294#ifdef DYNAMIC_BUFFERS
294static void dfx_rcv_flush(DFX_board_t *bp); 295static void dfx_rcv_flush(DFX_board_t *bp);
296#else
297static inline void dfx_rcv_flush(DFX_board_t *bp) {}
298#endif
295 299
296static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, 300static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
297 struct net_device *dev); 301 struct net_device *dev);
@@ -2849,7 +2853,7 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2849 * Align an sk_buff to a boundary power of 2 2853 * Align an sk_buff to a boundary power of 2
2850 * 2854 *
2851 */ 2855 */
2852 2856#ifdef DYNAMIC_BUFFERS
2853static void my_skb_align(struct sk_buff *skb, int n) 2857static void my_skb_align(struct sk_buff *skb, int n)
2854{ 2858{
2855 unsigned long x = (unsigned long)skb->data; 2859 unsigned long x = (unsigned long)skb->data;
@@ -2859,7 +2863,7 @@ static void my_skb_align(struct sk_buff *skb, int n)
2859 2863
2860 skb_reserve(skb, v - x); 2864 skb_reserve(skb, v - x);
2861} 2865}
2862 2866#endif
2863 2867
2864/* 2868/*
2865 * ================ 2869 * ================
@@ -3074,10 +3078,7 @@ static void dfx_rcv_queue_process(
3074 break; 3078 break;
3075 } 3079 }
3076 else { 3080 else {
3077#ifndef DYNAMIC_BUFFERS 3081 if (!rx_in_place) {
3078 if (! rx_in_place)
3079#endif
3080 {
3081 /* Receive buffer allocated, pass receive packet up */ 3082 /* Receive buffer allocated, pass receive packet up */
3082 3083
3083 skb_copy_to_linear_data(skb, 3084 skb_copy_to_linear_data(skb,
@@ -3453,10 +3454,6 @@ static void dfx_rcv_flush( DFX_board_t *bp )
3453 } 3454 }
3454 3455
3455 } 3456 }
3456#else
3457static inline void dfx_rcv_flush( DFX_board_t *bp )
3458{
3459}
3460#endif /* DYNAMIC_BUFFERS */ 3457#endif /* DYNAMIC_BUFFERS */
3461 3458
3462/* 3459/*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4ed38eaecea8..d97d5f39a04e 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -378,8 +378,10 @@ static int netvsc_init_buf(struct hv_device *device)
378 378
379 net_device->send_section_map = 379 net_device->send_section_map =
380 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL); 380 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
381 if (net_device->send_section_map == NULL) 381 if (net_device->send_section_map == NULL) {
382 ret = -ENOMEM;
382 goto cleanup; 383 goto cleanup;
384 }
383 385
384 goto exit; 386 goto exit;
385 387
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 6a999e6814a0..9408157a246c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1323,15 +1323,15 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1323{ 1323{
1324 struct dp83640_private *dp83640 = phydev->priv; 1324 struct dp83640_private *dp83640 = phydev->priv;
1325 1325
1326 if (!dp83640->hwts_rx_en)
1327 return false;
1328
1329 if (is_status_frame(skb, type)) { 1326 if (is_status_frame(skb, type)) {
1330 decode_status_frame(dp83640, skb); 1327 decode_status_frame(dp83640, skb);
1331 kfree_skb(skb); 1328 kfree_skb(skb);
1332 return true; 1329 return true;
1333 } 1330 }
1334 1331
1332 if (!dp83640->hwts_rx_en)
1333 return false;
1334
1335 SKB_PTP_TYPE(skb) = type; 1335 SKB_PTP_TYPE(skb) = type;
1336 skb_queue_tail(&dp83640->rx_queue, skb); 1336 skb_queue_tail(&dp83640->rx_queue, skb);
1337 schedule_work(&dp83640->ts_work); 1337 schedule_work(&dp83640->ts_work);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e58aa54484c..203651ebccb0 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -187,6 +187,50 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
187 return d ? to_mii_bus(d) : NULL; 187 return d ? to_mii_bus(d) : NULL;
188} 188}
189EXPORT_SYMBOL(of_mdio_find_bus); 189EXPORT_SYMBOL(of_mdio_find_bus);
190
191/* Walk the list of subnodes of a mdio bus and look for a node that matches the
192 * phy's address with its 'reg' property. If found, set the of_node pointer for
193 * the phy. This allows auto-probed pyh devices to be supplied with information
194 * passed in via DT.
195 */
196static void of_mdiobus_link_phydev(struct mii_bus *mdio,
197 struct phy_device *phydev)
198{
199 struct device *dev = &phydev->dev;
200 struct device_node *child;
201
202 if (dev->of_node || !mdio->dev.of_node)
203 return;
204
205 for_each_available_child_of_node(mdio->dev.of_node, child) {
206 int addr;
207 int ret;
208
209 ret = of_property_read_u32(child, "reg", &addr);
210 if (ret < 0) {
211 dev_err(dev, "%s has invalid PHY address\n",
212 child->full_name);
213 continue;
214 }
215
216 /* A PHY must have a reg property in the range [0-31] */
217 if (addr >= PHY_MAX_ADDR) {
218 dev_err(dev, "%s PHY address %i is too large\n",
219 child->full_name, addr);
220 continue;
221 }
222
223 if (addr == phydev->addr) {
224 dev->of_node = child;
225 return;
226 }
227 }
228}
229#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
230static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
231 struct phy_device *phydev)
232{
233}
190#endif 234#endif
191 235
192/** 236/**
@@ -211,6 +255,7 @@ int mdiobus_register(struct mii_bus *bus)
211 255
212 bus->dev.parent = bus->parent; 256 bus->dev.parent = bus->parent;
213 bus->dev.class = &mdio_bus_class; 257 bus->dev.class = &mdio_bus_class;
258 bus->dev.driver = bus->parent->driver;
214 bus->dev.groups = NULL; 259 bus->dev.groups = NULL;
215 dev_set_name(&bus->dev, "%s", bus->id); 260 dev_set_name(&bus->dev, "%s", bus->id);
216 261
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 35d753d22f78..22c57be4dfa0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -355,7 +355,7 @@ int phy_device_register(struct phy_device *phydev)
355 phydev->bus->phy_map[phydev->addr] = phydev; 355 phydev->bus->phy_map[phydev->addr] = phydev;
356 356
357 /* Run all of the fixups for this PHY */ 357 /* Run all of the fixups for this PHY */
358 err = phy_init_hw(phydev); 358 err = phy_scan_fixups(phydev);
359 if (err) { 359 if (err) {
360 pr_err("PHY %d failed to initialize\n", phydev->addr); 360 pr_err("PHY %d failed to initialize\n", phydev->addr);
361 goto out; 361 goto out;
@@ -575,6 +575,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
575 u32 flags, phy_interface_t interface) 575 u32 flags, phy_interface_t interface)
576{ 576{
577 struct device *d = &phydev->dev; 577 struct device *d = &phydev->dev;
578 struct module *bus_module;
578 int err; 579 int err;
579 580
580 /* Assume that if there is no driver, that it doesn't 581 /* Assume that if there is no driver, that it doesn't
@@ -599,6 +600,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
599 return -EBUSY; 600 return -EBUSY;
600 } 601 }
601 602
603 /* Increment the bus module reference count */
604 bus_module = phydev->bus->dev.driver ?
605 phydev->bus->dev.driver->owner : NULL;
606 if (!try_module_get(bus_module)) {
607 dev_err(&dev->dev, "failed to get the bus module\n");
608 return -EIO;
609 }
610
602 phydev->attached_dev = dev; 611 phydev->attached_dev = dev;
603 dev->phydev = phydev; 612 dev->phydev = phydev;
604 613
@@ -664,6 +673,10 @@ EXPORT_SYMBOL(phy_attach);
664void phy_detach(struct phy_device *phydev) 673void phy_detach(struct phy_device *phydev)
665{ 674{
666 int i; 675 int i;
676
677 if (phydev->bus->dev.driver)
678 module_put(phydev->bus->dev.driver->owner);
679
667 phydev->attached_dev->phydev = NULL; 680 phydev->attached_dev->phydev = NULL;
668 phydev->attached_dev = NULL; 681 phydev->attached_dev = NULL;
669 phy_suspend(phydev); 682 phy_suspend(phydev);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 91d6c1272fcf..d5b77ef3a210 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -539,7 +539,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
539{ 539{
540 struct sock_fprog uprog; 540 struct sock_fprog uprog;
541 struct sock_filter *code = NULL; 541 struct sock_filter *code = NULL;
542 int len, err; 542 int len;
543 543
544 if (copy_from_user(&uprog, arg, sizeof(uprog))) 544 if (copy_from_user(&uprog, arg, sizeof(uprog)))
545 return -EFAULT; 545 return -EFAULT;
@@ -554,12 +554,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
554 if (IS_ERR(code)) 554 if (IS_ERR(code))
555 return PTR_ERR(code); 555 return PTR_ERR(code);
556 556
557 err = sk_chk_filter(code, uprog.len);
558 if (err) {
559 kfree(code);
560 return err;
561 }
562
563 *p = code; 557 *p = code;
564 return uprog.len; 558 return uprog.len;
565} 559}
@@ -763,10 +757,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
763 }; 757 };
764 758
765 ppp_lock(ppp); 759 ppp_lock(ppp);
766 if (ppp->pass_filter) 760 if (ppp->pass_filter) {
767 sk_unattached_filter_destroy(ppp->pass_filter); 761 sk_unattached_filter_destroy(ppp->pass_filter);
768 err = sk_unattached_filter_create(&ppp->pass_filter, 762 ppp->pass_filter = NULL;
769 &fprog); 763 }
764 if (fprog.filter != NULL)
765 err = sk_unattached_filter_create(&ppp->pass_filter,
766 &fprog);
767 else
768 err = 0;
770 kfree(code); 769 kfree(code);
771 ppp_unlock(ppp); 770 ppp_unlock(ppp);
772 } 771 }
@@ -784,10 +783,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
784 }; 783 };
785 784
786 ppp_lock(ppp); 785 ppp_lock(ppp);
787 if (ppp->active_filter) 786 if (ppp->active_filter) {
788 sk_unattached_filter_destroy(ppp->active_filter); 787 sk_unattached_filter_destroy(ppp->active_filter);
789 err = sk_unattached_filter_create(&ppp->active_filter, 788 ppp->active_filter = NULL;
790 &fprog); 789 }
790 if (fprog.filter != NULL)
791 err = sk_unattached_filter_create(&ppp->active_filter,
792 &fprog);
793 else
794 err = 0;
791 kfree(code); 795 kfree(code);
792 ppp_unlock(ppp); 796 ppp_unlock(ppp);
793 } 797 }
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2ea7efd11857..6c9c16d76935 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
675 po->chan.hdrlen = (sizeof(struct pppoe_hdr) + 675 po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
676 dev->hard_header_len); 676 dev->hard_header_len);
677 677
678 po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr); 678 po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
679 po->chan.private = sk; 679 po->chan.private = sk;
680 po->chan.ops = &pppoe_chan_ops; 680 po->chan.ops = &pppoe_chan_ops;
681 681
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9ea4bfe5d318..2a32d9167d3b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -341,6 +341,22 @@ next_desc:
341 usb_driver_release_interface(driver, info->data); 341 usb_driver_release_interface(driver, info->data);
342 return -ENODEV; 342 return -ENODEV;
343 } 343 }
344
345 /* Some devices don't initialise properly. In particular
346 * the packet filter is not reset. There are devices that
347 * don't do reset all the way. So the packet filter should
348 * be set to a sane initial value.
349 */
350 usb_control_msg(dev->udev,
351 usb_sndctrlpipe(dev->udev, 0),
352 USB_CDC_SET_ETHERNET_PACKET_FILTER,
353 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
354 USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
355 intf->cur_altsetting->desc.bInterfaceNumber,
356 NULL,
357 0,
358 USB_CTRL_SET_TIMEOUT
359 );
344 return 0; 360 return 0;
345 361
346bad_desc: 362bad_desc:
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index a3a05869309d..a4272ed62da8 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -258,10 +258,8 @@ struct hso_serial {
258 * so as not to drop characters on the floor. 258 * so as not to drop characters on the floor.
259 */ 259 */
260 int curr_rx_urb_idx; 260 int curr_rx_urb_idx;
261 u16 curr_rx_urb_offset;
262 u8 rx_urb_filled[MAX_RX_URBS]; 261 u8 rx_urb_filled[MAX_RX_URBS];
263 struct tasklet_struct unthrottle_tasklet; 262 struct tasklet_struct unthrottle_tasklet;
264 struct work_struct retry_unthrottle_workqueue;
265}; 263};
266 264
267struct hso_device { 265struct hso_device {
@@ -1252,14 +1250,6 @@ static void hso_unthrottle(struct tty_struct *tty)
1252 tasklet_hi_schedule(&serial->unthrottle_tasklet); 1250 tasklet_hi_schedule(&serial->unthrottle_tasklet);
1253} 1251}
1254 1252
1255static void hso_unthrottle_workfunc(struct work_struct *work)
1256{
1257 struct hso_serial *serial =
1258 container_of(work, struct hso_serial,
1259 retry_unthrottle_workqueue);
1260 hso_unthrottle_tasklet(serial);
1261}
1262
1263/* open the requested serial port */ 1253/* open the requested serial port */
1264static int hso_serial_open(struct tty_struct *tty, struct file *filp) 1254static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1265{ 1255{
@@ -1295,8 +1285,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1295 tasklet_init(&serial->unthrottle_tasklet, 1285 tasklet_init(&serial->unthrottle_tasklet,
1296 (void (*)(unsigned long))hso_unthrottle_tasklet, 1286 (void (*)(unsigned long))hso_unthrottle_tasklet,
1297 (unsigned long)serial); 1287 (unsigned long)serial);
1298 INIT_WORK(&serial->retry_unthrottle_workqueue,
1299 hso_unthrottle_workfunc);
1300 result = hso_start_serial_device(serial->parent, GFP_KERNEL); 1288 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
1301 if (result) { 1289 if (result) {
1302 hso_stop_serial_device(serial->parent); 1290 hso_stop_serial_device(serial->parent);
@@ -1345,7 +1333,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1345 if (!usb_gone) 1333 if (!usb_gone)
1346 hso_stop_serial_device(serial->parent); 1334 hso_stop_serial_device(serial->parent);
1347 tasklet_kill(&serial->unthrottle_tasklet); 1335 tasklet_kill(&serial->unthrottle_tasklet);
1348 cancel_work_sync(&serial->retry_unthrottle_workqueue);
1349 } 1336 }
1350 1337
1351 if (!usb_gone) 1338 if (!usb_gone)
@@ -2013,8 +2000,7 @@ static void ctrl_callback(struct urb *urb)
2013static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) 2000static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
2014{ 2001{
2015 struct tty_struct *tty; 2002 struct tty_struct *tty;
2016 int write_length_remaining = 0; 2003 int count;
2017 int curr_write_len;
2018 2004
2019 /* Sanity check */ 2005 /* Sanity check */
2020 if (urb == NULL || serial == NULL) { 2006 if (urb == NULL || serial == NULL) {
@@ -2024,29 +2010,28 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
2024 2010
2025 tty = tty_port_tty_get(&serial->port); 2011 tty = tty_port_tty_get(&serial->port);
2026 2012
2013 if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
2014 tty_kref_put(tty);
2015 return -1;
2016 }
2017
2027 /* Push data to tty */ 2018 /* Push data to tty */
2028 write_length_remaining = urb->actual_length -
2029 serial->curr_rx_urb_offset;
2030 D1("data to push to tty"); 2019 D1("data to push to tty");
2031 while (write_length_remaining) { 2020 count = tty_buffer_request_room(&serial->port, urb->actual_length);
2032 if (tty && test_bit(TTY_THROTTLED, &tty->flags)) { 2021 if (count >= urb->actual_length) {
2033 tty_kref_put(tty); 2022 tty_insert_flip_string(&serial->port, urb->transfer_buffer,
2034 return -1; 2023 urb->actual_length);
2035 }
2036 curr_write_len = tty_insert_flip_string(&serial->port,
2037 urb->transfer_buffer + serial->curr_rx_urb_offset,
2038 write_length_remaining);
2039 serial->curr_rx_urb_offset += curr_write_len;
2040 write_length_remaining -= curr_write_len;
2041 tty_flip_buffer_push(&serial->port); 2024 tty_flip_buffer_push(&serial->port);
2025 } else {
2026 dev_warn(&serial->parent->usb->dev,
2027 "dropping data, %d bytes lost\n", urb->actual_length);
2042 } 2028 }
2029
2043 tty_kref_put(tty); 2030 tty_kref_put(tty);
2044 2031
2045 if (write_length_remaining == 0) { 2032 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
2046 serial->curr_rx_urb_offset = 0; 2033
2047 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; 2034 return 0;
2048 }
2049 return write_length_remaining;
2050} 2035}
2051 2036
2052 2037
@@ -2217,7 +2202,6 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
2217 } 2202 }
2218 } 2203 }
2219 serial->curr_rx_urb_idx = 0; 2204 serial->curr_rx_urb_idx = 0;
2220 serial->curr_rx_urb_offset = 0;
2221 2205
2222 if (serial->tx_urb) 2206 if (serial->tx_urb)
2223 usb_kill_urb(serial->tx_urb); 2207 usb_kill_urb(serial->tx_urb);
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 5d95a13dbe2a..735f7dadb9a0 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -194,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
194 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), 194 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
195 .driver_info = (unsigned long)&huawei_cdc_ncm_info, 195 .driver_info = (unsigned long)&huawei_cdc_ncm_info,
196 }, 196 },
197 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
198 .driver_info = (unsigned long)&huawei_cdc_ncm_info,
199 },
197 200
198 /* Terminating entry */ 201 /* Terminating entry */
199 { 202 {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index cf62d7e8329f..22756db53dca 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -667,6 +667,7 @@ static const struct usb_device_id products[] = {
667 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, 667 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
668 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, 668 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
670 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
670 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 671 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
671 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 672 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
672 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ 673 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
@@ -741,6 +742,7 @@ static const struct usb_device_id products[] = {
741 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, 742 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
742 {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, 743 {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
743 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ 744 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
745 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
744 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 746 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
745 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 747 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
746 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 748 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
@@ -756,6 +758,7 @@ static const struct usb_device_id products[] = {
756 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ 758 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
757 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */ 759 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
758 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ 760 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
761 {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
759 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 762 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
760 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 763 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
761 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 764 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 25431965a625..3eab74c7c554 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -282,7 +282,7 @@
282/* USB_DEV_STAT */ 282/* USB_DEV_STAT */
283#define STAT_SPEED_MASK 0x0006 283#define STAT_SPEED_MASK 0x0006
284#define STAT_SPEED_HIGH 0x0000 284#define STAT_SPEED_HIGH 0x0000
285#define STAT_SPEED_FULL 0x0001 285#define STAT_SPEED_FULL 0x0002
286 286
287/* USB_TX_AGG */ 287/* USB_TX_AGG */
288#define TX_AGG_MAX_THRESHOLD 0x03 288#define TX_AGG_MAX_THRESHOLD 0x03
@@ -1359,7 +1359,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
1359 struct sk_buff_head seg_list; 1359 struct sk_buff_head seg_list;
1360 struct sk_buff *segs, *nskb; 1360 struct sk_buff *segs, *nskb;
1361 1361
1362 features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO); 1362 features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1363 segs = skb_gso_segment(skb, features); 1363 segs = skb_gso_segment(skb, features);
1364 if (IS_ERR(segs) || !segs) 1364 if (IS_ERR(segs) || !segs)
1365 goto drop; 1365 goto drop;
@@ -2292,9 +2292,8 @@ static void r8152b_exit_oob(struct r8152 *tp)
2292 /* rx share fifo credit full threshold */ 2292 /* rx share fifo credit full threshold */
2293 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL); 2293 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
2294 2294
2295 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT); 2295 if (tp->udev->speed == USB_SPEED_FULL ||
2296 ocp_data &= STAT_SPEED_MASK; 2296 tp->udev->speed == USB_SPEED_LOW) {
2297 if (ocp_data == STAT_SPEED_FULL) {
2298 /* rx share fifo credit near full threshold */ 2297 /* rx share fifo credit near full threshold */
2299 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, 2298 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
2300 RXFIFO_THR2_FULL); 2299 RXFIFO_THR2_FULL);
@@ -3204,8 +3203,13 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev,
3204 struct r8152 *tp = netdev_priv(dev); 3203 struct r8152 *tp = netdev_priv(dev);
3205 struct tally_counter tally; 3204 struct tally_counter tally;
3206 3205
3206 if (usb_autopm_get_interface(tp->intf) < 0)
3207 return;
3208
3207 generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA); 3209 generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
3208 3210
3211 usb_autopm_put_interface(tp->intf);
3212
3209 data[0] = le64_to_cpu(tally.tx_packets); 3213 data[0] = le64_to_cpu(tally.tx_packets);
3210 data[1] = le64_to_cpu(tally.rx_packets); 3214 data[1] = le64_to_cpu(tally.rx_packets);
3211 data[2] = le64_to_cpu(tally.tx_errors); 3215 data[2] = le64_to_cpu(tally.tx_errors);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 424db65e4396..d07bf4cb893f 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1714,6 +1714,18 @@ static int smsc95xx_resume(struct usb_interface *intf)
1714 return ret; 1714 return ret;
1715} 1715}
1716 1716
1717static int smsc95xx_reset_resume(struct usb_interface *intf)
1718{
1719 struct usbnet *dev = usb_get_intfdata(intf);
1720 int ret;
1721
1722 ret = smsc95xx_reset(dev);
1723 if (ret < 0)
1724 return ret;
1725
1726 return smsc95xx_resume(intf);
1727}
1728
1717static void smsc95xx_rx_csum_offload(struct sk_buff *skb) 1729static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
1718{ 1730{
1719 skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2); 1731 skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -2004,7 +2016,7 @@ static struct usb_driver smsc95xx_driver = {
2004 .probe = usbnet_probe, 2016 .probe = usbnet_probe,
2005 .suspend = smsc95xx_suspend, 2017 .suspend = smsc95xx_suspend,
2006 .resume = smsc95xx_resume, 2018 .resume = smsc95xx_resume,
2007 .reset_resume = smsc95xx_resume, 2019 .reset_resume = smsc95xx_reset_resume,
2008 .disconnect = usbnet_disconnect, 2020 .disconnect = usbnet_disconnect,
2009 .disable_hub_initiated_lpm = 1, 2021 .disable_hub_initiated_lpm = 1,
2010 .supports_autosuspend = 1, 2022 .supports_autosuspend = 1,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ade33ef82823..9f79192c9aa0 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -339,7 +339,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
339 ndm->ndm_state = fdb->state; 339 ndm->ndm_state = fdb->state;
340 ndm->ndm_ifindex = vxlan->dev->ifindex; 340 ndm->ndm_ifindex = vxlan->dev->ifindex;
341 ndm->ndm_flags = fdb->flags; 341 ndm->ndm_flags = fdb->flags;
342 ndm->ndm_type = NDA_DST; 342 ndm->ndm_type = RTN_UNICAST;
343 343
344 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 344 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
345 goto nla_put_failure; 345 goto nla_put_failure;
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 93ace042d0aa..1f041271f7fe 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2363,7 +2363,7 @@ static char *type_strings[] = {
2363 "FarSync TE1" 2363 "FarSync TE1"
2364}; 2364};
2365 2365
2366static void 2366static int
2367fst_init_card(struct fst_card_info *card) 2367fst_init_card(struct fst_card_info *card)
2368{ 2368{
2369 int i; 2369 int i;
@@ -2374,24 +2374,21 @@ fst_init_card(struct fst_card_info *card)
2374 * we'll have to revise it in some way then. 2374 * we'll have to revise it in some way then.
2375 */ 2375 */
2376 for (i = 0; i < card->nports; i++) { 2376 for (i = 0; i < card->nports; i++) {
2377 err = register_hdlc_device(card->ports[i].dev); 2377 err = register_hdlc_device(card->ports[i].dev);
2378 if (err < 0) { 2378 if (err < 0) {
2379 int j;
2380 pr_err("Cannot register HDLC device for port %d (errno %d)\n", 2379 pr_err("Cannot register HDLC device for port %d (errno %d)\n",
2381 i, -err); 2380 i, -err);
2382 for (j = i; j < card->nports; j++) { 2381 while (i--)
2383 free_netdev(card->ports[j].dev); 2382 unregister_hdlc_device(card->ports[i].dev);
2384 card->ports[j].dev = NULL; 2383 return err;
2385 } 2384 }
2386 card->nports = i;
2387 break;
2388 }
2389 } 2385 }
2390 2386
2391 pr_info("%s-%s: %s IRQ%d, %d ports\n", 2387 pr_info("%s-%s: %s IRQ%d, %d ports\n",
2392 port_to_dev(&card->ports[0])->name, 2388 port_to_dev(&card->ports[0])->name,
2393 port_to_dev(&card->ports[card->nports - 1])->name, 2389 port_to_dev(&card->ports[card->nports - 1])->name,
2394 type_strings[card->type], card->irq, card->nports); 2390 type_strings[card->type], card->irq, card->nports);
2391 return 0;
2395} 2392}
2396 2393
2397static const struct net_device_ops fst_ops = { 2394static const struct net_device_ops fst_ops = {
@@ -2447,15 +2444,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2447 /* Try to enable the device */ 2444 /* Try to enable the device */
2448 if ((err = pci_enable_device(pdev)) != 0) { 2445 if ((err = pci_enable_device(pdev)) != 0) {
2449 pr_err("Failed to enable card. Err %d\n", -err); 2446 pr_err("Failed to enable card. Err %d\n", -err);
2450 kfree(card); 2447 goto enable_fail;
2451 return err;
2452 } 2448 }
2453 2449
2454 if ((err = pci_request_regions(pdev, "FarSync")) !=0) { 2450 if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
2455 pr_err("Failed to allocate regions. Err %d\n", -err); 2451 pr_err("Failed to allocate regions. Err %d\n", -err);
2456 pci_disable_device(pdev); 2452 goto regions_fail;
2457 kfree(card);
2458 return err;
2459 } 2453 }
2460 2454
2461 /* Get virtual addresses of memory regions */ 2455 /* Get virtual addresses of memory regions */
@@ -2464,30 +2458,21 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2464 card->phys_ctlmem = pci_resource_start(pdev, 3); 2458 card->phys_ctlmem = pci_resource_start(pdev, 3);
2465 if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) { 2459 if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
2466 pr_err("Physical memory remap failed\n"); 2460 pr_err("Physical memory remap failed\n");
2467 pci_release_regions(pdev); 2461 err = -ENODEV;
2468 pci_disable_device(pdev); 2462 goto ioremap_physmem_fail;
2469 kfree(card);
2470 return -ENODEV;
2471 } 2463 }
2472 if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) { 2464 if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
2473 pr_err("Control memory remap failed\n"); 2465 pr_err("Control memory remap failed\n");
2474 pci_release_regions(pdev); 2466 err = -ENODEV;
2475 pci_disable_device(pdev); 2467 goto ioremap_ctlmem_fail;
2476 iounmap(card->mem);
2477 kfree(card);
2478 return -ENODEV;
2479 } 2468 }
2480 dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem); 2469 dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
2481 2470
2482 /* Register the interrupt handler */ 2471 /* Register the interrupt handler */
2483 if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) { 2472 if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
2484 pr_err("Unable to register interrupt %d\n", card->irq); 2473 pr_err("Unable to register interrupt %d\n", card->irq);
2485 pci_release_regions(pdev); 2474 err = -ENODEV;
2486 pci_disable_device(pdev); 2475 goto irq_fail;
2487 iounmap(card->ctlmem);
2488 iounmap(card->mem);
2489 kfree(card);
2490 return -ENODEV;
2491 } 2476 }
2492 2477
2493 /* Record info we need */ 2478 /* Record info we need */
@@ -2513,13 +2498,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2513 while (i--) 2498 while (i--)
2514 free_netdev(card->ports[i].dev); 2499 free_netdev(card->ports[i].dev);
2515 pr_err("FarSync: out of memory\n"); 2500 pr_err("FarSync: out of memory\n");
2516 free_irq(card->irq, card); 2501 err = -ENOMEM;
2517 pci_release_regions(pdev); 2502 goto hdlcdev_fail;
2518 pci_disable_device(pdev);
2519 iounmap(card->ctlmem);
2520 iounmap(card->mem);
2521 kfree(card);
2522 return -ENODEV;
2523 } 2503 }
2524 card->ports[i].dev = dev; 2504 card->ports[i].dev = dev;
2525 card->ports[i].card = card; 2505 card->ports[i].card = card;
@@ -2565,9 +2545,16 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2565 pci_set_drvdata(pdev, card); 2545 pci_set_drvdata(pdev, card);
2566 2546
2567 /* Remainder of card setup */ 2547 /* Remainder of card setup */
2548 if (no_of_cards_added >= FST_MAX_CARDS) {
2549 pr_err("FarSync: too many cards\n");
2550 err = -ENOMEM;
2551 goto card_array_fail;
2552 }
2568 fst_card_array[no_of_cards_added] = card; 2553 fst_card_array[no_of_cards_added] = card;
2569 card->card_no = no_of_cards_added++; /* Record instance and bump it */ 2554 card->card_no = no_of_cards_added++; /* Record instance and bump it */
2570 fst_init_card(card); 2555 err = fst_init_card(card);
2556 if (err)
2557 goto init_card_fail;
2571 if (card->family == FST_FAMILY_TXU) { 2558 if (card->family == FST_FAMILY_TXU) {
2572 /* 2559 /*
2573 * Allocate a dma buffer for transmit and receives 2560 * Allocate a dma buffer for transmit and receives
@@ -2577,29 +2564,46 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2577 &card->rx_dma_handle_card); 2564 &card->rx_dma_handle_card);
2578 if (card->rx_dma_handle_host == NULL) { 2565 if (card->rx_dma_handle_host == NULL) {
2579 pr_err("Could not allocate rx dma buffer\n"); 2566 pr_err("Could not allocate rx dma buffer\n");
2580 fst_disable_intr(card); 2567 err = -ENOMEM;
2581 pci_release_regions(pdev); 2568 goto rx_dma_fail;
2582 pci_disable_device(pdev);
2583 iounmap(card->ctlmem);
2584 iounmap(card->mem);
2585 kfree(card);
2586 return -ENOMEM;
2587 } 2569 }
2588 card->tx_dma_handle_host = 2570 card->tx_dma_handle_host =
2589 pci_alloc_consistent(card->device, FST_MAX_MTU, 2571 pci_alloc_consistent(card->device, FST_MAX_MTU,
2590 &card->tx_dma_handle_card); 2572 &card->tx_dma_handle_card);
2591 if (card->tx_dma_handle_host == NULL) { 2573 if (card->tx_dma_handle_host == NULL) {
2592 pr_err("Could not allocate tx dma buffer\n"); 2574 pr_err("Could not allocate tx dma buffer\n");
2593 fst_disable_intr(card); 2575 err = -ENOMEM;
2594 pci_release_regions(pdev); 2576 goto tx_dma_fail;
2595 pci_disable_device(pdev);
2596 iounmap(card->ctlmem);
2597 iounmap(card->mem);
2598 kfree(card);
2599 return -ENOMEM;
2600 } 2577 }
2601 } 2578 }
2602 return 0; /* Success */ 2579 return 0; /* Success */
2580
2581tx_dma_fail:
2582 pci_free_consistent(card->device, FST_MAX_MTU,
2583 card->rx_dma_handle_host,
2584 card->rx_dma_handle_card);
2585rx_dma_fail:
2586 fst_disable_intr(card);
2587 for (i = 0 ; i < card->nports ; i++)
2588 unregister_hdlc_device(card->ports[i].dev);
2589init_card_fail:
2590 fst_card_array[card->card_no] = NULL;
2591card_array_fail:
2592 for (i = 0 ; i < card->nports ; i++)
2593 free_netdev(card->ports[i].dev);
2594hdlcdev_fail:
2595 free_irq(card->irq, card);
2596irq_fail:
2597 iounmap(card->ctlmem);
2598ioremap_ctlmem_fail:
2599 iounmap(card->mem);
2600ioremap_physmem_fail:
2601 pci_release_regions(pdev);
2602regions_fail:
2603 pci_disable_device(pdev);
2604enable_fail:
2605 kfree(card);
2606 return err;
2603} 2607}
2604 2608
2605/* 2609/*
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 5895f1978691..fa9fdfa128c1 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
122{ 122{
123 struct x25_asy *sl = netdev_priv(dev); 123 struct x25_asy *sl = netdev_priv(dev);
124 unsigned char *xbuff, *rbuff; 124 unsigned char *xbuff, *rbuff;
125 int len = 2 * newmtu; 125 int len;
126 126
127 if (newmtu > 65534)
128 return -EINVAL;
129
130 len = 2 * newmtu;
127 xbuff = kmalloc(len + 4, GFP_ATOMIC); 131 xbuff = kmalloc(len + 4, GFP_ATOMIC);
128 rbuff = kmalloc(len + 4, GFP_ATOMIC); 132 rbuff = kmalloc(len + 4, GFP_ATOMIC);
129 133
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 82017f56e661..e6c56c5bb0f6 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -795,7 +795,11 @@ int ath10k_core_start(struct ath10k *ar)
795 if (status) 795 if (status)
796 goto err_htc_stop; 796 goto err_htc_stop;
797 797
798 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; 798 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
799 ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1;
800 else
801 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
802
799 INIT_LIST_HEAD(&ar->arvifs); 803 INIT_LIST_HEAD(&ar->arvifs);
800 804
801 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) 805 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 6c102b1312ff..eebc860c3655 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -312,7 +312,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
312 int msdu_len, msdu_chaining = 0; 312 int msdu_len, msdu_chaining = 0;
313 struct sk_buff *msdu; 313 struct sk_buff *msdu;
314 struct htt_rx_desc *rx_desc; 314 struct htt_rx_desc *rx_desc;
315 bool corrupted = false;
316 315
317 lockdep_assert_held(&htt->rx_ring.lock); 316 lockdep_assert_held(&htt->rx_ring.lock);
318 317
@@ -439,9 +438,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
439 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & 438 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
440 RX_MSDU_END_INFO0_LAST_MSDU; 439 RX_MSDU_END_INFO0_LAST_MSDU;
441 440
442 if (msdu_chaining && !last_msdu)
443 corrupted = true;
444
445 if (last_msdu) { 441 if (last_msdu) {
446 msdu->next = NULL; 442 msdu->next = NULL;
447 break; 443 break;
@@ -457,20 +453,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
457 msdu_chaining = -1; 453 msdu_chaining = -1;
458 454
459 /* 455 /*
460 * Apparently FW sometimes reports weird chained MSDU sequences with
461 * more than one rx descriptor. This seems like a bug but needs more
462 * analyzing. For the time being fix it by dropping such sequences to
463 * avoid blowing up the host system.
464 */
465 if (corrupted) {
466 ath10k_warn("failed to pop chained msdus, dropping\n");
467 ath10k_htt_rx_free_msdu_chain(*head_msdu);
468 *head_msdu = NULL;
469 *tail_msdu = NULL;
470 msdu_chaining = -EINVAL;
471 }
472
473 /*
474 * Don't refill the ring yet. 456 * Don't refill the ring yet.
475 * 457 *
476 * First, the elements popped here are still in use - it is not 458 * First, the elements popped here are still in use - it is not
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 66acb2cbd9df..7c28cb55610b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -887,6 +887,15 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
887 887
888 tx_info = IEEE80211_SKB_CB(skb); 888 tx_info = IEEE80211_SKB_CB(skb);
889 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; 889 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
890
891 /*
892 * No aggregation session is running, but there may be frames
893 * from a previous session or a failed attempt in the queue.
894 * Send them out as normal data frames
895 */
896 if (!tid->active)
897 tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
898
890 if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) { 899 if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
891 bf->bf_state.bf_type = 0; 900 bf->bf_state.bf_type = 0;
892 return bf; 901 return bf;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 6db51a666f61..d06fcb05adf2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1184,8 +1184,6 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
1184 bus->bus_priv.usb = bus_pub; 1184 bus->bus_priv.usb = bus_pub;
1185 dev_set_drvdata(dev, bus); 1185 dev_set_drvdata(dev, bus);
1186 bus->ops = &brcmf_usb_bus_ops; 1186 bus->ops = &brcmf_usb_bus_ops;
1187 bus->chip = bus_pub->devid;
1188 bus->chiprev = bus_pub->chiprev;
1189 bus->proto_type = BRCMF_PROTO_BCDC; 1187 bus->proto_type = BRCMF_PROTO_BCDC;
1190 bus->always_use_fws_queue = true; 1188 bus->always_use_fws_queue = true;
1191 1189
@@ -1194,6 +1192,9 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
1194 if (ret) 1192 if (ret)
1195 goto fail; 1193 goto fail;
1196 } 1194 }
1195 bus->chip = bus_pub->devid;
1196 bus->chiprev = bus_pub->chiprev;
1197
1197 /* request firmware here */ 1198 /* request firmware here */
1198 brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL, 1199 brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
1199 brcmf_usb_probe_phase2); 1200 brcmf_usb_probe_phase2);
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index ed50de6362ed..6dc5dd3ced44 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1068,13 +1068,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1068 /* recalculate basic rates */ 1068 /* recalculate basic rates */
1069 iwl_calc_basic_rates(priv, ctx); 1069 iwl_calc_basic_rates(priv, ctx);
1070 1070
1071 /*
1072 * force CTS-to-self frames protection if RTS-CTS is not preferred
1073 * one aggregation protection method
1074 */
1075 if (!priv->hw_params.use_rts_for_aggregation)
1076 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1077
1078 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || 1071 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
1079 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) 1072 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
1080 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 1073 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -1480,11 +1473,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1480 else 1473 else
1481 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 1474 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1482 1475
1483 if (bss_conf->use_cts_prot)
1484 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1485 else
1486 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1487
1488 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); 1476 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
1489 1477
1490 if (vif->type == NL80211_IFTYPE_AP || 1478 if (vif->type == NL80211_IFTYPE_AP ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 0aa7c0085c9f..b1a33322b9ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -88,6 +88,7 @@
88 * P2P client interfaces simultaneously if they are in different bindings. 88 * P2P client interfaces simultaneously if they are in different bindings.
89 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and 89 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
90 * P2P client interfaces simultaneously if they are in same bindings. 90 * P2P client interfaces simultaneously if they are in same bindings.
91 * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
91 * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save 92 * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
92 * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. 93 * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
93 * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients 94 * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 8b5302777632..8b79081d4885 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -667,10 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
667 if (vif->bss_conf.qos) 667 if (vif->bss_conf.qos)
668 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); 668 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
669 669
670 if (vif->bss_conf.use_cts_prot) { 670 if (vif->bss_conf.use_cts_prot)
671 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); 671 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
672 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN); 672
673 }
674 IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", 673 IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
675 vif->bss_conf.use_cts_prot, 674 vif->bss_conf.use_cts_prot,
676 vif->bss_conf.ht_operation_mode); 675 vif->bss_conf.ht_operation_mode);
@@ -1073,8 +1072,12 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
1073 /* Fill the common data for all mac context types */ 1072 /* Fill the common data for all mac context types */
1074 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 1073 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
1075 1074
1076 /* Also enable probe requests to pass */ 1075 /*
1077 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); 1076 * pass probe requests and beacons from other APs (needed
1077 * for ht protection)
1078 */
1079 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
1080 MAC_FILTER_IN_BEACON);
1078 1081
1079 /* Fill the data specific for ap mode */ 1082 /* Fill the data specific for ap mode */
1080 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap, 1083 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
@@ -1095,6 +1098,13 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
1095 /* Fill the common data for all mac context types */ 1098 /* Fill the common data for all mac context types */
1096 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 1099 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
1097 1100
1101 /*
1102 * pass probe requests and beacons from other APs (needed
1103 * for ht protection)
1104 */
1105 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
1106 MAC_FILTER_IN_BEACON);
1107
1098 /* Fill the data specific for GO mode */ 1108 /* Fill the data specific for GO mode */
1099 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap, 1109 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
1100 action == FW_CTXT_ACTION_ADD); 1110 action == FW_CTXT_ACTION_ADD);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 7215f5980186..98556d03c1ed 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1159,8 +1159,12 @@ static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1159 1159
1160 bcast_mac = &cmd->macs[mvmvif->id]; 1160 bcast_mac = &cmd->macs[mvmvif->id];
1161 1161
1162 /* enable filtering only for associated stations */ 1162 /*
1163 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) 1163 * enable filtering only for associated stations, but not for P2P
1164 * Clients
1165 */
1166 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
1167 !vif->bss_conf.assoc)
1164 return; 1168 return;
1165 1169
1166 bcast_mac->default_discard = 1; 1170 bcast_mac->default_discard = 1;
@@ -1237,10 +1241,6 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
1237 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) 1241 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1238 return 0; 1242 return 0;
1239 1243
1240 /* bcast filtering isn't supported for P2P client */
1241 if (vif->p2p)
1242 return 0;
1243
1244 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) 1244 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1245 return 0; 1245 return 0;
1246 1246
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 4b6c7d4bd199..eac2b424f6a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -588,9 +588,7 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
588 struct iwl_scan_offload_cmd *scan, 588 struct iwl_scan_offload_cmd *scan,
589 struct iwl_mvm_scan_params *params) 589 struct iwl_mvm_scan_params *params)
590{ 590{
591 scan->channel_count = 591 scan->channel_count = req->n_channels;
592 mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
593 mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
594 scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME); 592 scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
595 scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH); 593 scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
596 scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT; 594 scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
@@ -669,61 +667,37 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
669 struct cfg80211_sched_scan_request *req, 667 struct cfg80211_sched_scan_request *req,
670 struct iwl_scan_channel_cfg *channels, 668 struct iwl_scan_channel_cfg *channels,
671 enum ieee80211_band band, 669 enum ieee80211_band band,
672 int *head, int *tail, 670 int *head,
673 u32 ssid_bitmap, 671 u32 ssid_bitmap,
674 struct iwl_mvm_scan_params *params) 672 struct iwl_mvm_scan_params *params)
675{ 673{
676 struct ieee80211_supported_band *s_band; 674 int i, index = 0;
677 int n_channels = req->n_channels;
678 int i, j, index = 0;
679 bool partial;
680 675
681 /* 676 for (i = 0; i < req->n_channels; i++) {
682 * We have to configure all supported channels, even if we don't want to 677 struct ieee80211_channel *chan = req->channels[i];
683 * scan on them, but we have to send channels in the order that we want 678
684 * to scan. So add requested channels to head of the list and others to 679 if (chan->band != band)
685 * the end. 680 continue;
686 */ 681
687 s_band = &mvm->nvm_data->bands[band]; 682 index = *head;
688 683 (*head)++;
689 for (i = 0; i < s_band->n_channels && *head <= *tail; i++) { 684
690 partial = false; 685 channels->channel_number[index] = cpu_to_le16(chan->hw_value);
691 for (j = 0; j < n_channels; j++)
692 if (s_band->channels[i].center_freq ==
693 req->channels[j]->center_freq) {
694 index = *head;
695 (*head)++;
696 /*
697 * Channels that came with the request will be
698 * in partial scan .
699 */
700 partial = true;
701 break;
702 }
703 if (!partial) {
704 index = *tail;
705 (*tail)--;
706 }
707 channels->channel_number[index] =
708 cpu_to_le16(ieee80211_frequency_to_channel(
709 s_band->channels[i].center_freq));
710 channels->dwell_time[index][0] = params->dwell[band].active; 686 channels->dwell_time[index][0] = params->dwell[band].active;
711 channels->dwell_time[index][1] = params->dwell[band].passive; 687 channels->dwell_time[index][1] = params->dwell[band].passive;
712 688
713 channels->iter_count[index] = cpu_to_le16(1); 689 channels->iter_count[index] = cpu_to_le16(1);
714 channels->iter_interval[index] = 0; 690 channels->iter_interval[index] = 0;
715 691
716 if (!(s_band->channels[i].flags & IEEE80211_CHAN_NO_IR)) 692 if (!(chan->flags & IEEE80211_CHAN_NO_IR))
717 channels->type[index] |= 693 channels->type[index] |=
718 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE); 694 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
719 695
720 channels->type[index] |= 696 channels->type[index] |=
721 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL); 697 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
722 if (partial) 698 IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
723 channels->type[index] |=
724 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
725 699
726 if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40) 700 if (chan->flags & IEEE80211_CHAN_NO_HT40)
727 channels->type[index] |= 701 channels->type[index] |=
728 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW); 702 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
729 703
@@ -740,7 +714,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
740 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; 714 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
741 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; 715 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
742 int head = 0; 716 int head = 0;
743 int tail = band_2ghz + band_5ghz - 1;
744 u32 ssid_bitmap; 717 u32 ssid_bitmap;
745 int cmd_len; 718 int cmd_len;
746 int ret; 719 int ret;
@@ -772,7 +745,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
772 &scan_cfg->scan_cmd.tx_cmd[0], 745 &scan_cfg->scan_cmd.tx_cmd[0],
773 scan_cfg->data); 746 scan_cfg->data);
774 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg, 747 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
775 IEEE80211_BAND_2GHZ, &head, &tail, 748 IEEE80211_BAND_2GHZ, &head,
776 ssid_bitmap, &params); 749 ssid_bitmap, &params);
777 } 750 }
778 if (band_5ghz) { 751 if (band_5ghz) {
@@ -782,7 +755,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
782 scan_cfg->data + 755 scan_cfg->data +
783 SCAN_OFFLOAD_PROBE_REQ_SIZE); 756 SCAN_OFFLOAD_PROBE_REQ_SIZE);
784 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg, 757 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
785 IEEE80211_BAND_5GHZ, &head, &tail, 758 IEEE80211_BAND_5GHZ, &head,
786 ssid_bitmap, &params); 759 ssid_bitmap, &params);
787 } 760 }
788 761
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 7091a18d5a72..98950e45c7b0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -367,6 +367,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
367 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, 367 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
368 {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, 368 {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
369 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, 372 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, 373 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
@@ -380,7 +381,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
380 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
381 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
382 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 383 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
383 {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)}, 384 {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
384 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, 385 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
385 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, 386 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
386 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 387 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 5b32106182f8..fe0f66f73507 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -185,6 +185,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
185 skb_reserve(skb_aggr, headroom + sizeof(struct txpd)); 185 skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
186 tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr); 186 tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
187 187
188 memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
188 tx_info_aggr->bss_type = tx_info_src->bss_type; 189 tx_info_aggr->bss_type = tx_info_src->bss_type;
189 tx_info_aggr->bss_num = tx_info_src->bss_num; 190 tx_info_aggr->bss_num = tx_info_src->bss_num;
190 191
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index e95dec91a561..b511613bba2d 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -220,6 +220,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
220 } 220 }
221 221
222 tx_info = MWIFIEX_SKB_TXCB(skb); 222 tx_info = MWIFIEX_SKB_TXCB(skb);
223 memset(tx_info, 0, sizeof(*tx_info));
223 tx_info->bss_num = priv->bss_num; 224 tx_info->bss_num = priv->bss_num;
224 tx_info->bss_type = priv->bss_type; 225 tx_info->bss_type = priv->bss_type;
225 tx_info->pkt_len = pkt_len; 226 tx_info->pkt_len = pkt_len;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 8dee6c86f4f1..c161141f6c39 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -453,6 +453,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
453 453
454 if (skb) { 454 if (skb) {
455 rx_info = MWIFIEX_SKB_RXCB(skb); 455 rx_info = MWIFIEX_SKB_RXCB(skb);
456 memset(rx_info, 0, sizeof(*rx_info));
456 rx_info->bss_num = priv->bss_num; 457 rx_info->bss_num = priv->bss_num;
457 rx_info->bss_type = priv->bss_type; 458 rx_info->bss_type = priv->bss_type;
458 } 459 }
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index cbabc12fbda3..e91cd0fa5ca8 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -645,6 +645,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
645 } 645 }
646 646
647 tx_info = MWIFIEX_SKB_TXCB(skb); 647 tx_info = MWIFIEX_SKB_TXCB(skb);
648 memset(tx_info, 0, sizeof(*tx_info));
648 tx_info->bss_num = priv->bss_num; 649 tx_info->bss_num = priv->bss_num;
649 tx_info->bss_type = priv->bss_type; 650 tx_info->bss_type = priv->bss_type;
650 tx_info->pkt_len = skb->len; 651 tx_info->pkt_len = skb->len;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 5fce7e78a36e..70eb863c7249 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -150,6 +150,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
150 return -1; 150 return -1;
151 151
152 tx_info = MWIFIEX_SKB_TXCB(skb); 152 tx_info = MWIFIEX_SKB_TXCB(skb);
153 memset(tx_info, 0, sizeof(*tx_info));
153 tx_info->bss_num = priv->bss_num; 154 tx_info->bss_num = priv->bss_num;
154 tx_info->bss_type = priv->bss_type; 155 tx_info->bss_type = priv->bss_type;
155 tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN); 156 tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN);
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index e73034fbbde9..0e88364e0c67 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -605,6 +605,7 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
605 } 605 }
606 606
607 tx_info = MWIFIEX_SKB_TXCB(skb); 607 tx_info = MWIFIEX_SKB_TXCB(skb);
608 memset(tx_info, 0, sizeof(*tx_info));
608 tx_info->bss_num = priv->bss_num; 609 tx_info->bss_num = priv->bss_num;
609 tx_info->bss_type = priv->bss_type; 610 tx_info->bss_type = priv->bss_type;
610 611
@@ -760,6 +761,7 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
760 skb->priority = MWIFIEX_PRIO_VI; 761 skb->priority = MWIFIEX_PRIO_VI;
761 762
762 tx_info = MWIFIEX_SKB_TXCB(skb); 763 tx_info = MWIFIEX_SKB_TXCB(skb);
764 memset(tx_info, 0, sizeof(*tx_info));
763 tx_info->bss_num = priv->bss_num; 765 tx_info->bss_num = priv->bss_num;
764 tx_info->bss_type = priv->bss_type; 766 tx_info->bss_type = priv->bss_type;
765 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT; 767 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 37f26afd4314..fd7e5b9b4581 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -55,6 +55,7 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
55 return -1; 55 return -1;
56 } 56 }
57 57
58 memset(rx_info, 0, sizeof(*rx_info));
58 rx_info->bss_num = priv->bss_num; 59 rx_info->bss_num = priv->bss_num;
59 rx_info->bss_type = priv->bss_type; 60 rx_info->bss_type = priv->bss_type;
60 61
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 9a56bc61cb1d..b0601b91cc4f 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -175,6 +175,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
175 } 175 }
176 176
177 tx_info = MWIFIEX_SKB_TXCB(skb); 177 tx_info = MWIFIEX_SKB_TXCB(skb);
178 memset(tx_info, 0, sizeof(*tx_info));
178 tx_info->bss_num = priv->bss_num; 179 tx_info->bss_num = priv->bss_num;
179 tx_info->bss_type = priv->bss_type; 180 tx_info->bss_type = priv->bss_type;
180 tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT; 181 tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index e11dab2216c6..832006b5aab1 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -231,9 +231,12 @@ static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer)
231 */ 231 */
232static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev) 232static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
233{ 233{
234 __le32 reg; 234 __le32 *reg;
235 u32 fw_mode; 235 u32 fw_mode;
236 236
237 reg = kmalloc(sizeof(*reg), GFP_KERNEL);
238 if (reg == NULL)
239 return -ENOMEM;
237 /* cannot use rt2x00usb_register_read here as it uses different 240 /* cannot use rt2x00usb_register_read here as it uses different
238 * mode (MULTI_READ vs. DEVICE_MODE) and does not pass the 241 * mode (MULTI_READ vs. DEVICE_MODE) and does not pass the
239 * magic value USB_MODE_AUTORUN (0x11) to the device, thus the 242 * magic value USB_MODE_AUTORUN (0x11) to the device, thus the
@@ -241,8 +244,9 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
241 */ 244 */
242 rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE, 245 rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
243 USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN, 246 USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
244 &reg, sizeof(reg), REGISTER_TIMEOUT_FIRMWARE); 247 reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE);
245 fw_mode = le32_to_cpu(reg); 248 fw_mode = le32_to_cpu(*reg);
249 kfree(reg);
246 250
247 if ((fw_mode & 0x00000003) == 2) 251 if ((fw_mode & 0x00000003) == 2)
248 return 1; 252 return 1;
@@ -261,6 +265,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
261 int status; 265 int status;
262 u32 offset; 266 u32 offset;
263 u32 length; 267 u32 length;
268 int retval;
264 269
265 /* 270 /*
266 * Check which section of the firmware we need. 271 * Check which section of the firmware we need.
@@ -278,7 +283,10 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
278 /* 283 /*
279 * Write firmware to device. 284 * Write firmware to device.
280 */ 285 */
281 if (rt2800usb_autorun_detect(rt2x00dev)) { 286 retval = rt2800usb_autorun_detect(rt2x00dev);
287 if (retval < 0)
288 return retval;
289 if (retval) {
282 rt2x00_info(rt2x00dev, 290 rt2x00_info(rt2x00dev,
283 "Firmware loading not required - NIC in AutoRun mode\n"); 291 "Firmware loading not required - NIC in AutoRun mode\n");
284 } else { 292 } else {
@@ -763,7 +771,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
763 */ 771 */
764static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev) 772static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev)
765{ 773{
766 if (rt2800usb_autorun_detect(rt2x00dev)) 774 int retval;
775
776 retval = rt2800usb_autorun_detect(rt2x00dev);
777 if (retval < 0)
778 return retval;
779 if (retval)
767 return 1; 780 return 1;
768 return rt2800_efuse_detect(rt2x00dev); 781 return rt2800_efuse_detect(rt2x00dev);
769} 782}
@@ -772,7 +785,10 @@ static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
772{ 785{
773 int retval; 786 int retval;
774 787
775 if (rt2800usb_efuse_detect(rt2x00dev)) 788 retval = rt2800usb_efuse_detect(rt2x00dev);
789 if (retval < 0)
790 return retval;
791 if (retval)
776 retval = rt2800_read_eeprom_efuse(rt2x00dev); 792 retval = rt2800_read_eeprom_efuse(rt2x00dev);
777 else 793 else
778 retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, 794 retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1844a47636b6..c65b636bcab9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1030{ 1030{
1031 struct gnttab_map_grant_ref *gop_map = *gopp_map; 1031 struct gnttab_map_grant_ref *gop_map = *gopp_map;
1032 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 1032 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1033 /* This always points to the shinfo of the skb being checked, which
1034 * could be either the first or the one on the frag_list
1035 */
1033 struct skb_shared_info *shinfo = skb_shinfo(skb); 1036 struct skb_shared_info *shinfo = skb_shinfo(skb);
1037 /* If this is non-NULL, we are currently checking the frag_list skb, and
1038 * this points to the shinfo of the first one
1039 */
1040 struct skb_shared_info *first_shinfo = NULL;
1034 int nr_frags = shinfo->nr_frags; 1041 int nr_frags = shinfo->nr_frags;
1042 const bool sharedslot = nr_frags &&
1043 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
1035 int i, err; 1044 int i, err;
1036 struct sk_buff *first_skb = NULL;
1037 1045
1038 /* Check status of header. */ 1046 /* Check status of header. */
1039 err = (*gopp_copy)->status; 1047 err = (*gopp_copy)->status;
1040 (*gopp_copy)++;
1041 if (unlikely(err)) { 1048 if (unlikely(err)) {
1042 if (net_ratelimit()) 1049 if (net_ratelimit())
1043 netdev_dbg(queue->vif->dev, 1050 netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1045 (*gopp_copy)->status, 1052 (*gopp_copy)->status,
1046 pending_idx, 1053 pending_idx,
1047 (*gopp_copy)->source.u.ref); 1054 (*gopp_copy)->source.u.ref);
1048 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 1055 /* The first frag might still have this slot mapped */
1056 if (!sharedslot)
1057 xenvif_idx_release(queue, pending_idx,
1058 XEN_NETIF_RSP_ERROR);
1049 } 1059 }
1060 (*gopp_copy)++;
1050 1061
1051check_frags: 1062check_frags:
1052 for (i = 0; i < nr_frags; i++, gop_map++) { 1063 for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@ check_frags:
1062 pending_idx, 1073 pending_idx,
1063 gop_map->handle); 1074 gop_map->handle);
1064 /* Had a previous error? Invalidate this fragment. */ 1075 /* Had a previous error? Invalidate this fragment. */
1065 if (unlikely(err)) 1076 if (unlikely(err)) {
1066 xenvif_idx_unmap(queue, pending_idx); 1077 xenvif_idx_unmap(queue, pending_idx);
1078 /* If the mapping of the first frag was OK, but
1079 * the header's copy failed, and they are
1080 * sharing a slot, send an error
1081 */
1082 if (i == 0 && sharedslot)
1083 xenvif_idx_release(queue, pending_idx,
1084 XEN_NETIF_RSP_ERROR);
1085 else
1086 xenvif_idx_release(queue, pending_idx,
1087 XEN_NETIF_RSP_OKAY);
1088 }
1067 continue; 1089 continue;
1068 } 1090 }
1069 1091
@@ -1075,42 +1097,53 @@ check_frags:
1075 gop_map->status, 1097 gop_map->status,
1076 pending_idx, 1098 pending_idx,
1077 gop_map->ref); 1099 gop_map->ref);
1100
1078 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 1101 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1079 1102
1080 /* Not the first error? Preceding frags already invalidated. */ 1103 /* Not the first error? Preceding frags already invalidated. */
1081 if (err) 1104 if (err)
1082 continue; 1105 continue;
1083 /* First error: invalidate preceding fragments. */ 1106
1107 /* First error: if the header haven't shared a slot with the
1108 * first frag, release it as well.
1109 */
1110 if (!sharedslot)
1111 xenvif_idx_release(queue,
1112 XENVIF_TX_CB(skb)->pending_idx,
1113 XEN_NETIF_RSP_OKAY);
1114
1115 /* Invalidate preceding fragments of this skb. */
1084 for (j = 0; j < i; j++) { 1116 for (j = 0; j < i; j++) {
1085 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1117 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1086 xenvif_idx_unmap(queue, pending_idx); 1118 xenvif_idx_unmap(queue, pending_idx);
1119 xenvif_idx_release(queue, pending_idx,
1120 XEN_NETIF_RSP_OKAY);
1121 }
1122
1123 /* And if we found the error while checking the frag_list, unmap
1124 * the first skb's frags
1125 */
1126 if (first_shinfo) {
1127 for (j = 0; j < first_shinfo->nr_frags; j++) {
1128 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1129 xenvif_idx_unmap(queue, pending_idx);
1130 xenvif_idx_release(queue, pending_idx,
1131 XEN_NETIF_RSP_OKAY);
1132 }
1087 } 1133 }
1088 1134
1089 /* Remember the error: invalidate all subsequent fragments. */ 1135 /* Remember the error: invalidate all subsequent fragments. */
1090 err = newerr; 1136 err = newerr;
1091 } 1137 }
1092 1138
1093 if (skb_has_frag_list(skb)) { 1139 if (skb_has_frag_list(skb) && !first_shinfo) {
1094 first_skb = skb; 1140 first_shinfo = skb_shinfo(skb);
1095 skb = shinfo->frag_list; 1141 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1096 shinfo = skb_shinfo(skb);
1097 nr_frags = shinfo->nr_frags; 1142 nr_frags = shinfo->nr_frags;
1098 1143
1099 goto check_frags; 1144 goto check_frags;
1100 } 1145 }
1101 1146
1102 /* There was a mapping error in the frag_list skb. We have to unmap
1103 * the first skb's frags
1104 */
1105 if (first_skb && err) {
1106 int j;
1107 shinfo = skb_shinfo(first_skb);
1108 for (j = 0; j < shinfo->nr_frags; j++) {
1109 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1110 xenvif_idx_unmap(queue, pending_idx);
1111 }
1112 }
1113
1114 *gopp_map = gop_map; 1147 *gopp_map = gop_map;
1115 return err; 1148 return err;
1116} 1149}
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1518 1551
1519 /* Check the remap error code. */ 1552 /* Check the remap error code. */
1520 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { 1553 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1554 /* If there was an error, xenvif_tx_check_gop is
1555 * expected to release all the frags which were mapped,
1556 * so kfree_skb shouldn't do it again
1557 */
1521 skb_shinfo(skb)->nr_frags = 0; 1558 skb_shinfo(skb)->nr_frags = 0;
1559 if (skb_has_frag_list(skb)) {
1560 struct sk_buff *nskb =
1561 skb_shinfo(skb)->frag_list;
1562 skb_shinfo(nskb)->nr_frags = 0;
1563 }
1522 kfree_skb(skb); 1564 kfree_skb(skb);
1523 continue; 1565 continue;
1524 } 1566 }
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1822 tx_unmap_op.status); 1864 tx_unmap_op.status);
1823 BUG(); 1865 BUG();
1824 } 1866 }
1825
1826 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
1827} 1867}
1828 1868
1829static inline int rx_work_todo(struct xenvif_queue *queue) 1869static inline int rx_work_todo(struct xenvif_queue *queue)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2ccb4a02368b..055222bae6e4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1439,16 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1439 unsigned int i = 0; 1439 unsigned int i = 0;
1440 unsigned int num_queues = info->netdev->real_num_tx_queues; 1440 unsigned int num_queues = info->netdev->real_num_tx_queues;
1441 1441
1442 netif_carrier_off(info->netdev);
1443
1442 for (i = 0; i < num_queues; ++i) { 1444 for (i = 0; i < num_queues; ++i) {
1443 struct netfront_queue *queue = &info->queues[i]; 1445 struct netfront_queue *queue = &info->queues[i];
1444 1446
1445 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1446 spin_lock_bh(&queue->rx_lock);
1447 spin_lock_irq(&queue->tx_lock);
1448 netif_carrier_off(queue->info->netdev);
1449 spin_unlock_irq(&queue->tx_lock);
1450 spin_unlock_bh(&queue->rx_lock);
1451
1452 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 1447 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1453 unbind_from_irqhandler(queue->tx_irq, queue); 1448 unbind_from_irqhandler(queue->tx_irq, queue);
1454 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { 1449 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@@ -1458,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1458 queue->tx_evtchn = queue->rx_evtchn = 0; 1453 queue->tx_evtchn = queue->rx_evtchn = 0;
1459 queue->tx_irq = queue->rx_irq = 0; 1454 queue->tx_irq = queue->rx_irq = 0;
1460 1455
1456 napi_synchronize(&queue->napi);
1457
1461 /* End access and free the pages */ 1458 /* End access and free the pages */
1462 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 1459 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1463 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 1460 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -2046,13 +2043,15 @@ static int xennet_connect(struct net_device *dev)
2046 /* By now, the queue structures have been set up */ 2043 /* By now, the queue structures have been set up */
2047 for (j = 0; j < num_queues; ++j) { 2044 for (j = 0; j < num_queues; ++j) {
2048 queue = &np->queues[j]; 2045 queue = &np->queues[j];
2049 spin_lock_bh(&queue->rx_lock);
2050 spin_lock_irq(&queue->tx_lock);
2051 2046
2052 /* Step 1: Discard all pending TX packet fragments. */ 2047 /* Step 1: Discard all pending TX packet fragments. */
2048 spin_lock_irq(&queue->tx_lock);
2053 xennet_release_tx_bufs(queue); 2049 xennet_release_tx_bufs(queue);
2050 spin_unlock_irq(&queue->tx_lock);
2054 2051
2055 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 2052 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
2053 spin_lock_bh(&queue->rx_lock);
2054
2056 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 2055 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
2057 skb_frag_t *frag; 2056 skb_frag_t *frag;
2058 const struct page *page; 2057 const struct page *page;
@@ -2076,6 +2075,8 @@ static int xennet_connect(struct net_device *dev)
2076 } 2075 }
2077 2076
2078 queue->rx.req_prod_pvt = requeue_idx; 2077 queue->rx.req_prod_pvt = requeue_idx;
2078
2079 spin_unlock_bh(&queue->rx_lock);
2079 } 2080 }
2080 2081
2081 /* 2082 /*
@@ -2087,13 +2088,17 @@ static int xennet_connect(struct net_device *dev)
2087 netif_carrier_on(np->netdev); 2088 netif_carrier_on(np->netdev);
2088 for (j = 0; j < num_queues; ++j) { 2089 for (j = 0; j < num_queues; ++j) {
2089 queue = &np->queues[j]; 2090 queue = &np->queues[j];
2091
2090 notify_remote_via_irq(queue->tx_irq); 2092 notify_remote_via_irq(queue->tx_irq);
2091 if (queue->tx_irq != queue->rx_irq) 2093 if (queue->tx_irq != queue->rx_irq)
2092 notify_remote_via_irq(queue->rx_irq); 2094 notify_remote_via_irq(queue->rx_irq);
2093 xennet_tx_buf_gc(queue);
2094 xennet_alloc_rx_buffers(queue);
2095 2095
2096 spin_lock_irq(&queue->tx_lock);
2097 xennet_tx_buf_gc(queue);
2096 spin_unlock_irq(&queue->tx_lock); 2098 spin_unlock_irq(&queue->tx_lock);
2099
2100 spin_lock_bh(&queue->rx_lock);
2101 xennet_alloc_rx_buffers(queue);
2097 spin_unlock_bh(&queue->rx_lock); 2102 spin_unlock_bh(&queue->rx_lock);
2098 } 2103 }
2099 2104
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index b777d8f46bd5..9aa012e6ea0a 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -26,6 +26,54 @@
26#include <asm/setup.h> /* for COMMAND_LINE_SIZE */ 26#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
27#include <asm/page.h> 27#include <asm/page.h>
28 28
29/*
30 * of_fdt_limit_memory - limit the number of regions in the /memory node
31 * @limit: maximum entries
32 *
33 * Adjust the flattened device tree to have at most 'limit' number of
34 * memory entries in the /memory node. This function may be called
35 * any time after initial_boot_param is set.
36 */
37void of_fdt_limit_memory(int limit)
38{
39 int memory;
40 int len;
41 const void *val;
42 int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
43 int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
44 const uint32_t *addr_prop;
45 const uint32_t *size_prop;
46 int root_offset;
47 int cell_size;
48
49 root_offset = fdt_path_offset(initial_boot_params, "/");
50 if (root_offset < 0)
51 return;
52
53 addr_prop = fdt_getprop(initial_boot_params, root_offset,
54 "#address-cells", NULL);
55 if (addr_prop)
56 nr_address_cells = fdt32_to_cpu(*addr_prop);
57
58 size_prop = fdt_getprop(initial_boot_params, root_offset,
59 "#size-cells", NULL);
60 if (size_prop)
61 nr_size_cells = fdt32_to_cpu(*size_prop);
62
63 cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
64
65 memory = fdt_path_offset(initial_boot_params, "/memory");
66 if (memory > 0) {
67 val = fdt_getprop(initial_boot_params, memory, "reg", &len);
68 if (len > limit*cell_size) {
69 len = limit*cell_size;
70 pr_debug("Limiting number of entries to %d\n", limit);
71 fdt_setprop(initial_boot_params, memory, "reg", val,
72 len);
73 }
74 }
75}
76
29/** 77/**
30 * of_fdt_is_compatible - Return true if given node from the given blob has 78 * of_fdt_is_compatible - Return true if given node from the given blob has
31 * compat in its compatible list 79 * compat in its compatible list
@@ -937,7 +985,7 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
937} 985}
938#endif 986#endif
939 987
940bool __init early_init_dt_scan(void *params) 988bool __init early_init_dt_verify(void *params)
941{ 989{
942 if (!params) 990 if (!params)
943 return false; 991 return false;
@@ -951,6 +999,12 @@ bool __init early_init_dt_scan(void *params)
951 return false; 999 return false;
952 } 1000 }
953 1001
1002 return true;
1003}
1004
1005
1006void __init early_init_dt_scan_nodes(void)
1007{
954 /* Retrieve various information from the /chosen node */ 1008 /* Retrieve various information from the /chosen node */
955 of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); 1009 of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
956 1010
@@ -959,7 +1013,17 @@ bool __init early_init_dt_scan(void *params)
959 1013
960 /* Setup memory, calling early_init_dt_add_memory_arch */ 1014 /* Setup memory, calling early_init_dt_add_memory_arch */
961 of_scan_flat_dt(early_init_dt_scan_memory, NULL); 1015 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
1016}
1017
1018bool __init early_init_dt_scan(void *params)
1019{
1020 bool status;
1021
1022 status = early_init_dt_verify(params);
1023 if (!status)
1024 return false;
962 1025
1026 early_init_dt_scan_nodes();
963 return true; 1027 return true;
964} 1028}
965 1029
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index a3bf2122a8d5..401b2453da45 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -182,40 +182,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
182} 182}
183EXPORT_SYMBOL(of_mdiobus_register); 183EXPORT_SYMBOL(of_mdiobus_register);
184 184
185/**
186 * of_mdiobus_link_phydev - Find a device node for a phy
187 * @mdio: pointer to mii_bus structure
188 * @phydev: phydev for which the of_node pointer should be set
189 *
190 * Walk the list of subnodes of a mdio bus and look for a node that matches the
191 * phy's address with its 'reg' property. If found, set the of_node pointer for
192 * the phy. This allows auto-probed pyh devices to be supplied with information
193 * passed in via DT.
194 */
195void of_mdiobus_link_phydev(struct mii_bus *mdio,
196 struct phy_device *phydev)
197{
198 struct device *dev = &phydev->dev;
199 struct device_node *child;
200
201 if (dev->of_node || !mdio->dev.of_node)
202 return;
203
204 for_each_available_child_of_node(mdio->dev.of_node, child) {
205 int addr;
206
207 addr = of_mdio_parse_addr(&mdio->dev, child);
208 if (addr < 0)
209 continue;
210
211 if (addr == phydev->addr) {
212 dev->of_node = child;
213 return;
214 }
215 }
216}
217EXPORT_SYMBOL(of_mdiobus_link_phydev);
218
219/* Helper function for of_phy_find_device */ 185/* Helper function for of_phy_find_device */
220static int of_phy_match(struct device *dev, void *phy_np) 186static int of_phy_match(struct device *dev, void *phy_np)
221{ 187{
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 2872ece81f35..44333bd8f908 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -5,6 +5,12 @@
5# Parport configuration. 5# Parport configuration.
6# 6#
7 7
8config ARCH_MIGHT_HAVE_PC_PARPORT
9 bool
10 help
11 Select this config option from the architecture Kconfig if
12 the architecture might have PC parallel port hardware.
13
8menuconfig PARPORT 14menuconfig PARPORT
9 tristate "Parallel port support" 15 tristate "Parallel port support"
10 depends on HAS_IOMEM 16 depends on HAS_IOMEM
@@ -31,12 +37,6 @@ menuconfig PARPORT
31 37
32 If unsure, say Y. 38 If unsure, say Y.
33 39
34config ARCH_MIGHT_HAVE_PC_PARPORT
35 bool
36 help
37 Select this config option from the architecture Kconfig if
38 the architecture might have PC parallel port hardware.
39
40if PARPORT 40if PARPORT
41 41
42config PARPORT_PC 42config PARPORT_PC
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 1bd6363bc95e..9f43916637ca 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
1431 1431
1432 status = readl(info->irqmux_base); 1432 status = readl(info->irqmux_base);
1433 1433
1434 for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK) 1434 for_each_set_bit(n, &status, info->nbanks)
1435 __gpio_irq_handler(&info->banks[n]); 1435 __gpio_irq_handler(&info->banks[n]);
1436 1436
1437 chained_irq_exit(chip, desc); 1437 chained_irq_exit(chip, desc);
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index b81448b2c75d..a5c6cb773e5f 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -319,8 +319,7 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
319 struct pnp_dev *pnp = _pnp; 319 struct pnp_dev *pnp = _pnp;
320 320
321 /* true means it matched */ 321 /* true means it matched */
322 return !acpi->physical_node_count 322 return pnp->data == acpi;
323 && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
324} 323}
325 324
326static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev) 325static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 9b60b1f3261c..44341dc5b148 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
287 "desc %p not ACKed\n", tx_desc); 287 "desc %p not ACKed\n", tx_desc);
288 } 288 }
289 289
290 if (ret == NULL) {
291 dev_dbg(bdma_chan->dchan.device->dev,
292 "%s: unable to obtain tx descriptor\n", __func__);
293 goto err_out;
294 }
295
290 i = bdma_chan->wr_count_next % bdma_chan->bd_num; 296 i = bdma_chan->wr_count_next % bdma_chan->bd_num;
291 if (i == bdma_chan->bd_num - 1) { 297 if (i == bdma_chan->bd_num - 1) {
292 i = 0; 298 i = 0;
@@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
297 tx_desc->txd.phys = bdma_chan->bd_phys + 303 tx_desc->txd.phys = bdma_chan->bd_phys +
298 i * sizeof(struct tsi721_dma_desc); 304 i * sizeof(struct tsi721_dma_desc);
299 tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i]; 305 tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
300 306err_out:
301 spin_unlock_bh(&bdma_chan->lock); 307 spin_unlock_bh(&bdma_chan->lock);
302 308
303 return ret; 309 return ret;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 15b3459f8656..220acb4cbee5 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -633,7 +633,6 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
633 } else 633 } else
634 raw3270_writesf_readpart(rp); 634 raw3270_writesf_readpart(rp);
635 memset(&rp->init_reset, 0, sizeof(rp->init_reset)); 635 memset(&rp->init_reset, 0, sizeof(rp->init_reset));
636 memset(&rp->init_data, 0, sizeof(rp->init_data));
637} 636}
638 637
639static int 638static int
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 69ef4f8cfac8..4038437ff033 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -901,10 +901,15 @@ static int ap_device_probe(struct device *dev)
901 int rc; 901 int rc;
902 902
903 ap_dev->drv = ap_drv; 903 ap_dev->drv = ap_drv;
904
905 spin_lock_bh(&ap_device_list_lock);
906 list_add(&ap_dev->list, &ap_device_list);
907 spin_unlock_bh(&ap_device_list_lock);
908
904 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 909 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
905 if (!rc) { 910 if (rc) {
906 spin_lock_bh(&ap_device_list_lock); 911 spin_lock_bh(&ap_device_list_lock);
907 list_add(&ap_dev->list, &ap_device_list); 912 list_del_init(&ap_dev->list);
908 spin_unlock_bh(&ap_device_list_lock); 913 spin_unlock_bh(&ap_device_list_lock);
909 } 914 }
910 return rc; 915 return rc;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f7e316368c99..3f50dfcb3227 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -733,6 +733,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
733 scsi_next_command(cmd); 733 scsi_next_command(cmd);
734 return; 734 return;
735 } 735 }
736 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
737 /*
738 * Certain non BLOCK_PC requests are commands that don't
739 * actually transfer anything (FLUSH), so cannot use
740 * good_bytes != blk_rq_bytes(req) as the signal for an error.
741 * This sets the error explicitly for the problem case.
742 */
743 error = __scsi_error_from_host_byte(cmd, result);
736 } 744 }
737 745
738 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ 746 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 78b0fba7047e..8afc6fee40c5 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -1,6 +1,6 @@
1config VIDEO_OMAP4 1config VIDEO_OMAP4
2 bool "OMAP 4 Camera support" 2 bool "OMAP 4 Camera support"
3 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4 3 depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
4 select VIDEOBUF2_DMA_CONTIG 4 select VIDEOBUF2_DMA_CONTIG
5 ---help--- 5 ---help---
6 Driver for an OMAP 4 ISS controller. 6 Driver for an OMAP 4 ISS controller.
diff --git a/drivers/staging/rtl8723au/os_dep/usb_intf.c b/drivers/staging/rtl8723au/os_dep/usb_intf.c
index 8b25c1aa2025..ebb19b22f47f 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_intf.c
@@ -530,8 +530,10 @@ int rtw_resume_process23a(struct rtw_adapter *padapter)
530 pwrpriv->bkeepfwalive = false; 530 pwrpriv->bkeepfwalive = false;
531 531
532 DBG_8723A("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive); 532 DBG_8723A("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
533 if (pm_netdev_open23a(pnetdev, true) != 0) 533 if (pm_netdev_open23a(pnetdev, true) != 0) {
534 up(&pwrpriv->lock);
534 goto exit; 535 goto exit;
536 }
535 537
536 netif_device_attach(pnetdev); 538 netif_device_attach(pnetdev);
537 netif_carrier_on(pnetdev); 539 netif_carrier_on(pnetdev);
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
index 59679cd46816..69b80e80b011 100644
--- a/drivers/staging/vt6655/bssdb.c
+++ b/drivers/staging/vt6655/bssdb.c
@@ -981,7 +981,7 @@ start:
981 pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1)); 981 pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
982 } 982 }
983 983
984 { 984 if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
985 pDevice->byReAssocCount++; 985 pDevice->byReAssocCount++;
986 /* 10 sec timeout */ 986 /* 10 sec timeout */
987 if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) { 987 if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) {
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 1d3908d044d0..5a5fd937a442 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -2318,6 +2318,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
2318 int handled = 0; 2318 int handled = 0;
2319 unsigned char byData = 0; 2319 unsigned char byData = 0;
2320 int ii = 0; 2320 int ii = 0;
2321 unsigned long flags;
2321 2322
2322 MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr); 2323 MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
2323 2324
@@ -2331,7 +2332,8 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
2331 2332
2332 handled = 1; 2333 handled = 1;
2333 MACvIntDisable(pDevice->PortOffset); 2334 MACvIntDisable(pDevice->PortOffset);
2334 spin_lock_irq(&pDevice->lock); 2335
2336 spin_lock_irqsave(&pDevice->lock, flags);
2335 2337
2336 //Make sure current page is 0 2338 //Make sure current page is 0
2337 VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel); 2339 VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel);
@@ -2560,7 +2562,8 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
2560 if (byOrgPageSel == 1) 2562 if (byOrgPageSel == 1)
2561 MACvSelectPage1(pDevice->PortOffset); 2563 MACvSelectPage1(pDevice->PortOffset);
2562 2564
2563 spin_unlock_irq(&pDevice->lock); 2565 spin_unlock_irqrestore(&pDevice->lock, flags);
2566
2564 MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE); 2567 MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
2565 2568
2566 return IRQ_RETVAL(handled); 2569 return IRQ_RETVAL(handled);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 9d2b673f90e3..b8125aa64ad8 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1169,8 +1169,8 @@ static int ep_enable(struct usb_ep *ep,
1169 1169
1170 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) 1170 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1171 cap |= QH_IOS; 1171 cap |= QH_IOS;
1172 if (hwep->num) 1172
1173 cap |= QH_ZLT; 1173 cap |= QH_ZLT;
1174 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; 1174 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1175 /* 1175 /*
1176 * For ISO-TX, we set mult at QH as the largest value, and use 1176 * For ISO-TX, we set mult at QH as the largest value, and use
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 21b99b4b4082..0e950ad8cb25 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -889,6 +889,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
889 if (!hub_is_superspeed(hub->hdev)) 889 if (!hub_is_superspeed(hub->hdev))
890 return -EINVAL; 890 return -EINVAL;
891 891
892 ret = hub_port_status(hub, port1, &portstatus, &portchange);
893 if (ret < 0)
894 return ret;
895
896 /*
897 * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
898 * Controller [1022:7814] will have spurious result making the following
899 * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
900 * as high-speed device if we set the usb 3.0 port link state to
901 * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
902 * check the state here to avoid the bug.
903 */
904 if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
905 USB_SS_PORT_LS_RX_DETECT) {
906 dev_dbg(&hub->ports[port1 - 1]->dev,
907 "Not disabling port; link state is RxDetect\n");
908 return ret;
909 }
910
892 ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); 911 ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
893 if (ret) 912 if (ret)
894 return ret; 913 return ret;
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index b7a506f2bb14..5c660c77f03b 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -426,20 +426,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
426 * p2m are consistent. 426 * p2m are consistent.
427 */ 427 */
428 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 428 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
429 unsigned long p;
430 struct page *scratch_page = get_balloon_scratch_page();
431
432 if (!PageHighMem(page)) { 429 if (!PageHighMem(page)) {
430 struct page *scratch_page = get_balloon_scratch_page();
431
433 ret = HYPERVISOR_update_va_mapping( 432 ret = HYPERVISOR_update_va_mapping(
434 (unsigned long)__va(pfn << PAGE_SHIFT), 433 (unsigned long)__va(pfn << PAGE_SHIFT),
435 pfn_pte(page_to_pfn(scratch_page), 434 pfn_pte(page_to_pfn(scratch_page),
436 PAGE_KERNEL_RO), 0); 435 PAGE_KERNEL_RO), 0);
437 BUG_ON(ret); 436 BUG_ON(ret);
438 }
439 p = page_to_pfn(scratch_page);
440 __set_phys_to_machine(pfn, pfn_to_mfn(p));
441 437
442 put_balloon_scratch_page(); 438 put_balloon_scratch_page();
439 }
440 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
443 } 441 }
444#endif 442#endif
445 443
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 5d4de88fe5b8..eeba7544f0cd 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1195,18 +1195,20 @@ static int gnttab_expand(unsigned int req_entries)
1195int gnttab_init(void) 1195int gnttab_init(void)
1196{ 1196{
1197 int i; 1197 int i;
1198 unsigned long max_nr_grant_frames;
1198 unsigned int max_nr_glist_frames, nr_glist_frames; 1199 unsigned int max_nr_glist_frames, nr_glist_frames;
1199 unsigned int nr_init_grefs; 1200 unsigned int nr_init_grefs;
1200 int ret; 1201 int ret;
1201 1202
1202 gnttab_request_version(); 1203 gnttab_request_version();
1204 max_nr_grant_frames = gnttab_max_grant_frames();
1203 nr_grant_frames = 1; 1205 nr_grant_frames = 1;
1204 1206
1205 /* Determine the maximum number of frames required for the 1207 /* Determine the maximum number of frames required for the
1206 * grant reference free list on the current hypervisor. 1208 * grant reference free list on the current hypervisor.
1207 */ 1209 */
1208 BUG_ON(grefs_per_grant_frame == 0); 1210 BUG_ON(grefs_per_grant_frame == 0);
1209 max_nr_glist_frames = (gnttab_max_grant_frames() * 1211 max_nr_glist_frames = (max_nr_grant_frames *
1210 grefs_per_grant_frame / RPP); 1212 grefs_per_grant_frame / RPP);
1211 1213
1212 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), 1214 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
@@ -1223,6 +1225,11 @@ int gnttab_init(void)
1223 } 1225 }
1224 } 1226 }
1225 1227
1228 ret = arch_gnttab_init(max_nr_grant_frames,
1229 nr_status_frames(max_nr_grant_frames));
1230 if (ret < 0)
1231 goto ini_nomem;
1232
1226 if (gnttab_setup() < 0) { 1233 if (gnttab_setup() < 0) {
1227 ret = -ENODEV; 1234 ret = -ENODEV;
1228 goto ini_nomem; 1235 goto ini_nomem;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c3667b202f2f..5f1e1f3cd186 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -88,7 +88,6 @@ static int xen_suspend(void *data)
88 88
89 if (!si->cancelled) { 89 if (!si->cancelled) {
90 xen_irq_resume(); 90 xen_irq_resume();
91 xen_console_resume();
92 xen_timer_resume(); 91 xen_timer_resume();
93 } 92 }
94 93
@@ -135,6 +134,10 @@ static void do_suspend(void)
135 134
136 err = stop_machine(xen_suspend, &si, cpumask_of(0)); 135 err = stop_machine(xen_suspend, &si, cpumask_of(0));
137 136
137 /* Resume console as early as possible. */
138 if (!si.cancelled)
139 xen_console_resume();
140
138 raw_notifier_call_chain(&xen_resume_notifier, 0, NULL); 141 raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
139 142
140 dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); 143 dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 42dd2e499ed8..35de0c04729f 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -55,13 +55,13 @@ static int __init afs_get_client_UUID(void)
55 afs_uuid.time_low = uuidtime; 55 afs_uuid.time_low = uuidtime;
56 afs_uuid.time_mid = uuidtime >> 32; 56 afs_uuid.time_mid = uuidtime >> 32;
57 afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK; 57 afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK;
58 afs_uuid.time_hi_and_version = AFS_UUID_VERSION_TIME; 58 afs_uuid.time_hi_and_version |= AFS_UUID_VERSION_TIME;
59 59
60 get_random_bytes(&clockseq, 2); 60 get_random_bytes(&clockseq, 2);
61 afs_uuid.clock_seq_low = clockseq; 61 afs_uuid.clock_seq_low = clockseq;
62 afs_uuid.clock_seq_hi_and_reserved = 62 afs_uuid.clock_seq_hi_and_reserved =
63 (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK; 63 (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK;
64 afs_uuid.clock_seq_hi_and_reserved = AFS_UUID_VARIANT_STD; 64 afs_uuid.clock_seq_hi_and_reserved |= AFS_UUID_VARIANT_STD;
65 65
66 _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", 66 _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
67 afs_uuid.time_low, 67 afs_uuid.time_low,
diff --git a/fs/aio.c b/fs/aio.c
index 955947ef3e02..1c9c5f0a9e2b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
830static void put_reqs_available(struct kioctx *ctx, unsigned nr) 830static void put_reqs_available(struct kioctx *ctx, unsigned nr)
831{ 831{
832 struct kioctx_cpu *kcpu; 832 struct kioctx_cpu *kcpu;
833 unsigned long flags;
833 834
834 preempt_disable(); 835 preempt_disable();
835 kcpu = this_cpu_ptr(ctx->cpu); 836 kcpu = this_cpu_ptr(ctx->cpu);
836 837
838 local_irq_save(flags);
837 kcpu->reqs_available += nr; 839 kcpu->reqs_available += nr;
840
838 while (kcpu->reqs_available >= ctx->req_batch * 2) { 841 while (kcpu->reqs_available >= ctx->req_batch * 2) {
839 kcpu->reqs_available -= ctx->req_batch; 842 kcpu->reqs_available -= ctx->req_batch;
840 atomic_add(ctx->req_batch, &ctx->reqs_available); 843 atomic_add(ctx->req_batch, &ctx->reqs_available);
841 } 844 }
842 845
846 local_irq_restore(flags);
843 preempt_enable(); 847 preempt_enable();
844} 848}
845 849
@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
847{ 851{
848 struct kioctx_cpu *kcpu; 852 struct kioctx_cpu *kcpu;
849 bool ret = false; 853 bool ret = false;
854 unsigned long flags;
850 855
851 preempt_disable(); 856 preempt_disable();
852 kcpu = this_cpu_ptr(ctx->cpu); 857 kcpu = this_cpu_ptr(ctx->cpu);
853 858
859 local_irq_save(flags);
854 if (!kcpu->reqs_available) { 860 if (!kcpu->reqs_available) {
855 int old, avail = atomic_read(&ctx->reqs_available); 861 int old, avail = atomic_read(&ctx->reqs_available);
856 862
@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
869 ret = true; 875 ret = true;
870 kcpu->reqs_available--; 876 kcpu->reqs_available--;
871out: 877out:
878 local_irq_restore(flags);
872 preempt_enable(); 879 preempt_enable();
873 return ret; 880 return ret;
874} 881}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index e12441c7cf1d..7187b14faa6c 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -484,8 +484,19 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
484 log_list); 484 log_list);
485 list_del_init(&ordered->log_list); 485 list_del_init(&ordered->log_list);
486 spin_unlock_irq(&log->log_extents_lock[index]); 486 spin_unlock_irq(&log->log_extents_lock[index]);
487
488 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
489 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
490 struct inode *inode = ordered->inode;
491 u64 start = ordered->file_offset;
492 u64 end = ordered->file_offset + ordered->len - 1;
493
494 WARN_ON(!inode);
495 filemap_fdatawrite_range(inode->i_mapping, start, end);
496 }
487 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, 497 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
488 &ordered->flags)); 498 &ordered->flags));
499
489 btrfs_put_ordered_extent(ordered); 500 btrfs_put_ordered_extent(ordered);
490 spin_lock_irq(&log->log_extents_lock[index]); 501 spin_lock_irq(&log->log_extents_lock[index]);
491 } 502 }
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6104676857f5..6cb82f62cb7c 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1680,11 +1680,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1680 if (device->bdev == root->fs_info->fs_devices->latest_bdev) 1680 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1681 root->fs_info->fs_devices->latest_bdev = next_device->bdev; 1681 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1682 1682
1683 if (device->bdev) 1683 if (device->bdev) {
1684 device->fs_devices->open_devices--; 1684 device->fs_devices->open_devices--;
1685 1685 /* remove sysfs entry */
1686 /* remove sysfs entry */ 1686 btrfs_kobj_rm_device(root->fs_info, device);
1687 btrfs_kobj_rm_device(root->fs_info, device); 1687 }
1688 1688
1689 call_rcu(&device->rcu, free_device); 1689 call_rcu(&device->rcu, free_device);
1690 1690
diff --git a/fs/coredump.c b/fs/coredump.c
index 0b2528fb640e..a93f7e6ea4cf 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
306 if (unlikely(nr < 0)) 306 if (unlikely(nr < 0))
307 return nr; 307 return nr;
308 308
309 tsk->flags = PF_DUMPCORE; 309 tsk->flags |= PF_DUMPCORE;
310 if (atomic_read(&mm->mm_users) == nr + 1) 310 if (atomic_read(&mm->mm_users) == nr + 1)
311 goto done; 311 goto done;
312 /* 312 /*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 98040ba388ac..17e39b047de5 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -71,7 +71,6 @@ struct dio_submit {
71 been performed at the start of a 71 been performed at the start of a
72 write */ 72 write */
73 int pages_in_io; /* approximate total IO pages */ 73 int pages_in_io; /* approximate total IO pages */
74 size_t size; /* total request size (doesn't change)*/
75 sector_t block_in_file; /* Current offset into the underlying 74 sector_t block_in_file; /* Current offset into the underlying
76 file in dio_block units. */ 75 file in dio_block units. */
77 unsigned blocks_available; /* At block_in_file. changes */ 76 unsigned blocks_available; /* At block_in_file. changes */
@@ -198,9 +197,8 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
198 * L1 cache. 197 * L1 cache.
199 */ 198 */
200static inline struct page *dio_get_page(struct dio *dio, 199static inline struct page *dio_get_page(struct dio *dio,
201 struct dio_submit *sdio, size_t *from, size_t *to) 200 struct dio_submit *sdio)
202{ 201{
203 int n;
204 if (dio_pages_present(sdio) == 0) { 202 if (dio_pages_present(sdio) == 0) {
205 int ret; 203 int ret;
206 204
@@ -209,10 +207,7 @@ static inline struct page *dio_get_page(struct dio *dio,
209 return ERR_PTR(ret); 207 return ERR_PTR(ret);
210 BUG_ON(dio_pages_present(sdio) == 0); 208 BUG_ON(dio_pages_present(sdio) == 0);
211 } 209 }
212 n = sdio->head++; 210 return dio->pages[sdio->head];
213 *from = n ? 0 : sdio->from;
214 *to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
215 return dio->pages[n];
216} 211}
217 212
218/** 213/**
@@ -911,11 +906,15 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
911 while (sdio->block_in_file < sdio->final_block_in_request) { 906 while (sdio->block_in_file < sdio->final_block_in_request) {
912 struct page *page; 907 struct page *page;
913 size_t from, to; 908 size_t from, to;
914 page = dio_get_page(dio, sdio, &from, &to); 909
910 page = dio_get_page(dio, sdio);
915 if (IS_ERR(page)) { 911 if (IS_ERR(page)) {
916 ret = PTR_ERR(page); 912 ret = PTR_ERR(page);
917 goto out; 913 goto out;
918 } 914 }
915 from = sdio->head ? 0 : sdio->from;
916 to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
917 sdio->head++;
919 918
920 while (from < to) { 919 while (from < to) {
921 unsigned this_chunk_bytes; /* # of bytes mapped */ 920 unsigned this_chunk_bytes; /* # of bytes mapped */
@@ -1104,7 +1103,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1104 unsigned blkbits = i_blkbits; 1103 unsigned blkbits = i_blkbits;
1105 unsigned blocksize_mask = (1 << blkbits) - 1; 1104 unsigned blocksize_mask = (1 << blkbits) - 1;
1106 ssize_t retval = -EINVAL; 1105 ssize_t retval = -EINVAL;
1107 loff_t end = offset + iov_iter_count(iter); 1106 size_t count = iov_iter_count(iter);
1107 loff_t end = offset + count;
1108 struct dio *dio; 1108 struct dio *dio;
1109 struct dio_submit sdio = { 0, }; 1109 struct dio_submit sdio = { 0, };
1110 struct buffer_head map_bh = { 0, }; 1110 struct buffer_head map_bh = { 0, };
@@ -1287,10 +1287,9 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1287 */ 1287 */
1288 BUG_ON(retval == -EIOCBQUEUED); 1288 BUG_ON(retval == -EIOCBQUEUED);
1289 if (dio->is_async && retval == 0 && dio->result && 1289 if (dio->is_async && retval == 0 && dio->result &&
1290 ((rw == READ) || (dio->result == sdio.size))) 1290 (rw == READ || dio->result == count))
1291 retval = -EIOCBQUEUED; 1291 retval = -EIOCBQUEUED;
1292 1292 else
1293 if (retval != -EIOCBQUEUED)
1294 dio_await_completion(dio); 1293 dio_await_completion(dio);
1295 1294
1296 if (drop_refcount(dio) == 0) { 1295 if (drop_refcount(dio) == 0) {
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 098f97bdcf1b..ca887314aba9 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -643,9 +643,8 @@ struct fuse_copy_state {
643 unsigned long seglen; 643 unsigned long seglen;
644 unsigned long addr; 644 unsigned long addr;
645 struct page *pg; 645 struct page *pg;
646 void *mapaddr;
647 void *buf;
648 unsigned len; 646 unsigned len;
647 unsigned offset;
649 unsigned move_pages:1; 648 unsigned move_pages:1;
650}; 649};
651 650
@@ -666,23 +665,17 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
666 if (cs->currbuf) { 665 if (cs->currbuf) {
667 struct pipe_buffer *buf = cs->currbuf; 666 struct pipe_buffer *buf = cs->currbuf;
668 667
669 if (!cs->write) { 668 if (cs->write)
670 kunmap_atomic(cs->mapaddr);
671 } else {
672 kunmap_atomic(cs->mapaddr);
673 buf->len = PAGE_SIZE - cs->len; 669 buf->len = PAGE_SIZE - cs->len;
674 }
675 cs->currbuf = NULL; 670 cs->currbuf = NULL;
676 cs->mapaddr = NULL; 671 } else if (cs->pg) {
677 } else if (cs->mapaddr) {
678 kunmap_atomic(cs->mapaddr);
679 if (cs->write) { 672 if (cs->write) {
680 flush_dcache_page(cs->pg); 673 flush_dcache_page(cs->pg);
681 set_page_dirty_lock(cs->pg); 674 set_page_dirty_lock(cs->pg);
682 } 675 }
683 put_page(cs->pg); 676 put_page(cs->pg);
684 cs->mapaddr = NULL;
685 } 677 }
678 cs->pg = NULL;
686} 679}
687 680
688/* 681/*
@@ -691,7 +684,7 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
691 */ 684 */
692static int fuse_copy_fill(struct fuse_copy_state *cs) 685static int fuse_copy_fill(struct fuse_copy_state *cs)
693{ 686{
694 unsigned long offset; 687 struct page *page;
695 int err; 688 int err;
696 689
697 unlock_request(cs->fc, cs->req); 690 unlock_request(cs->fc, cs->req);
@@ -706,14 +699,12 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
706 699
707 BUG_ON(!cs->nr_segs); 700 BUG_ON(!cs->nr_segs);
708 cs->currbuf = buf; 701 cs->currbuf = buf;
709 cs->mapaddr = kmap_atomic(buf->page); 702 cs->pg = buf->page;
703 cs->offset = buf->offset;
710 cs->len = buf->len; 704 cs->len = buf->len;
711 cs->buf = cs->mapaddr + buf->offset;
712 cs->pipebufs++; 705 cs->pipebufs++;
713 cs->nr_segs--; 706 cs->nr_segs--;
714 } else { 707 } else {
715 struct page *page;
716
717 if (cs->nr_segs == cs->pipe->buffers) 708 if (cs->nr_segs == cs->pipe->buffers)
718 return -EIO; 709 return -EIO;
719 710
@@ -726,8 +717,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
726 buf->len = 0; 717 buf->len = 0;
727 718
728 cs->currbuf = buf; 719 cs->currbuf = buf;
729 cs->mapaddr = kmap_atomic(page); 720 cs->pg = page;
730 cs->buf = cs->mapaddr; 721 cs->offset = 0;
731 cs->len = PAGE_SIZE; 722 cs->len = PAGE_SIZE;
732 cs->pipebufs++; 723 cs->pipebufs++;
733 cs->nr_segs++; 724 cs->nr_segs++;
@@ -740,14 +731,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
740 cs->iov++; 731 cs->iov++;
741 cs->nr_segs--; 732 cs->nr_segs--;
742 } 733 }
743 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg); 734 err = get_user_pages_fast(cs->addr, 1, cs->write, &page);
744 if (err < 0) 735 if (err < 0)
745 return err; 736 return err;
746 BUG_ON(err != 1); 737 BUG_ON(err != 1);
747 offset = cs->addr % PAGE_SIZE; 738 cs->pg = page;
748 cs->mapaddr = kmap_atomic(cs->pg); 739 cs->offset = cs->addr % PAGE_SIZE;
749 cs->buf = cs->mapaddr + offset; 740 cs->len = min(PAGE_SIZE - cs->offset, cs->seglen);
750 cs->len = min(PAGE_SIZE - offset, cs->seglen);
751 cs->seglen -= cs->len; 741 cs->seglen -= cs->len;
752 cs->addr += cs->len; 742 cs->addr += cs->len;
753 } 743 }
@@ -760,15 +750,20 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
760{ 750{
761 unsigned ncpy = min(*size, cs->len); 751 unsigned ncpy = min(*size, cs->len);
762 if (val) { 752 if (val) {
753 void *pgaddr = kmap_atomic(cs->pg);
754 void *buf = pgaddr + cs->offset;
755
763 if (cs->write) 756 if (cs->write)
764 memcpy(cs->buf, *val, ncpy); 757 memcpy(buf, *val, ncpy);
765 else 758 else
766 memcpy(*val, cs->buf, ncpy); 759 memcpy(*val, buf, ncpy);
760
761 kunmap_atomic(pgaddr);
767 *val += ncpy; 762 *val += ncpy;
768 } 763 }
769 *size -= ncpy; 764 *size -= ncpy;
770 cs->len -= ncpy; 765 cs->len -= ncpy;
771 cs->buf += ncpy; 766 cs->offset += ncpy;
772 return ncpy; 767 return ncpy;
773} 768}
774 769
@@ -874,8 +869,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
874out_fallback_unlock: 869out_fallback_unlock:
875 unlock_page(newpage); 870 unlock_page(newpage);
876out_fallback: 871out_fallback:
877 cs->mapaddr = kmap_atomic(buf->page); 872 cs->pg = buf->page;
878 cs->buf = cs->mapaddr + buf->offset; 873 cs->offset = buf->offset;
879 874
880 err = lock_request(cs->fc, cs->req); 875 err = lock_request(cs->fc, cs->req);
881 if (err) 876 if (err)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 42198359fa1b..0c6048247a34 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -198,7 +198,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
198 inode = ACCESS_ONCE(entry->d_inode); 198 inode = ACCESS_ONCE(entry->d_inode);
199 if (inode && is_bad_inode(inode)) 199 if (inode && is_bad_inode(inode))
200 goto invalid; 200 goto invalid;
201 else if (fuse_dentry_time(entry) < get_jiffies_64()) { 201 else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
202 (flags & LOOKUP_REVAL)) {
202 int err; 203 int err;
203 struct fuse_entry_out outarg; 204 struct fuse_entry_out outarg;
204 struct fuse_req *req; 205 struct fuse_req *req;
@@ -814,13 +815,6 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
814 return err; 815 return err;
815} 816}
816 817
817static int fuse_rename(struct inode *olddir, struct dentry *oldent,
818 struct inode *newdir, struct dentry *newent)
819{
820 return fuse_rename_common(olddir, oldent, newdir, newent, 0,
821 FUSE_RENAME, sizeof(struct fuse_rename_in));
822}
823
824static int fuse_rename2(struct inode *olddir, struct dentry *oldent, 818static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
825 struct inode *newdir, struct dentry *newent, 819 struct inode *newdir, struct dentry *newent,
826 unsigned int flags) 820 unsigned int flags)
@@ -831,17 +825,30 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
831 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) 825 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
832 return -EINVAL; 826 return -EINVAL;
833 827
834 if (fc->no_rename2 || fc->minor < 23) 828 if (flags) {
835 return -EINVAL; 829 if (fc->no_rename2 || fc->minor < 23)
830 return -EINVAL;
836 831
837 err = fuse_rename_common(olddir, oldent, newdir, newent, flags, 832 err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
838 FUSE_RENAME2, sizeof(struct fuse_rename2_in)); 833 FUSE_RENAME2,
839 if (err == -ENOSYS) { 834 sizeof(struct fuse_rename2_in));
840 fc->no_rename2 = 1; 835 if (err == -ENOSYS) {
841 err = -EINVAL; 836 fc->no_rename2 = 1;
837 err = -EINVAL;
838 }
839 } else {
840 err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
841 FUSE_RENAME,
842 sizeof(struct fuse_rename_in));
842 } 843 }
844
843 return err; 845 return err;
846}
844 847
848static int fuse_rename(struct inode *olddir, struct dentry *oldent,
849 struct inode *newdir, struct dentry *newent)
850{
851 return fuse_rename2(olddir, oldent, newdir, newent, 0);
845} 852}
846 853
847static int fuse_link(struct dentry *entry, struct inode *newdir, 854static int fuse_link(struct dentry *entry, struct inode *newdir,
@@ -985,7 +992,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
985 int err; 992 int err;
986 bool r; 993 bool r;
987 994
988 if (fi->i_time < get_jiffies_64()) { 995 if (time_before64(fi->i_time, get_jiffies_64())) {
989 r = true; 996 r = true;
990 err = fuse_do_getattr(inode, stat, file); 997 err = fuse_do_getattr(inode, stat, file);
991 } else { 998 } else {
@@ -1171,7 +1178,7 @@ static int fuse_permission(struct inode *inode, int mask)
1171 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) { 1178 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1172 struct fuse_inode *fi = get_fuse_inode(inode); 1179 struct fuse_inode *fi = get_fuse_inode(inode);
1173 1180
1174 if (fi->i_time < get_jiffies_64()) { 1181 if (time_before64(fi->i_time, get_jiffies_64())) {
1175 refreshed = true; 1182 refreshed = true;
1176 1183
1177 err = fuse_perm_getattr(inode, mask); 1184 err = fuse_perm_getattr(inode, mask);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 6e16dad13e9b..40ac2628ddcf 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1687,7 +1687,7 @@ static int fuse_writepage_locked(struct page *page)
1687 error = -EIO; 1687 error = -EIO;
1688 req->ff = fuse_write_file_get(fc, fi); 1688 req->ff = fuse_write_file_get(fc, fi);
1689 if (!req->ff) 1689 if (!req->ff)
1690 goto err_free; 1690 goto err_nofile;
1691 1691
1692 fuse_write_fill(req, req->ff, page_offset(page), 0); 1692 fuse_write_fill(req, req->ff, page_offset(page), 0);
1693 1693
@@ -1715,6 +1715,8 @@ static int fuse_writepage_locked(struct page *page)
1715 1715
1716 return 0; 1716 return 0;
1717 1717
1718err_nofile:
1719 __free_page(tmp_page);
1718err_free: 1720err_free:
1719 fuse_request_free(req); 1721 fuse_request_free(req);
1720err: 1722err:
@@ -1955,8 +1957,8 @@ static int fuse_writepages(struct address_space *mapping,
1955 data.ff = NULL; 1957 data.ff = NULL;
1956 1958
1957 err = -ENOMEM; 1959 err = -ENOMEM;
1958 data.orig_pages = kzalloc(sizeof(struct page *) * 1960 data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
1959 FUSE_MAX_PAGES_PER_REQ, 1961 sizeof(struct page *),
1960 GFP_NOFS); 1962 GFP_NOFS);
1961 if (!data.orig_pages) 1963 if (!data.orig_pages)
1962 goto out; 1964 goto out;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 754dcf23de8a..03246cd9d47a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -478,6 +478,17 @@ static const match_table_t tokens = {
478 {OPT_ERR, NULL} 478 {OPT_ERR, NULL}
479}; 479};
480 480
481static int fuse_match_uint(substring_t *s, unsigned int *res)
482{
483 int err = -ENOMEM;
484 char *buf = match_strdup(s);
485 if (buf) {
486 err = kstrtouint(buf, 10, res);
487 kfree(buf);
488 }
489 return err;
490}
491
481static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) 492static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
482{ 493{
483 char *p; 494 char *p;
@@ -488,6 +499,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
488 while ((p = strsep(&opt, ",")) != NULL) { 499 while ((p = strsep(&opt, ",")) != NULL) {
489 int token; 500 int token;
490 int value; 501 int value;
502 unsigned uv;
491 substring_t args[MAX_OPT_ARGS]; 503 substring_t args[MAX_OPT_ARGS];
492 if (!*p) 504 if (!*p)
493 continue; 505 continue;
@@ -511,18 +523,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
511 break; 523 break;
512 524
513 case OPT_USER_ID: 525 case OPT_USER_ID:
514 if (match_int(&args[0], &value)) 526 if (fuse_match_uint(&args[0], &uv))
515 return 0; 527 return 0;
516 d->user_id = make_kuid(current_user_ns(), value); 528 d->user_id = make_kuid(current_user_ns(), uv);
517 if (!uid_valid(d->user_id)) 529 if (!uid_valid(d->user_id))
518 return 0; 530 return 0;
519 d->user_id_present = 1; 531 d->user_id_present = 1;
520 break; 532 break;
521 533
522 case OPT_GROUP_ID: 534 case OPT_GROUP_ID:
523 if (match_int(&args[0], &value)) 535 if (fuse_match_uint(&args[0], &uv))
524 return 0; 536 return 0;
525 d->group_id = make_kgid(current_user_ns(), value); 537 d->group_id = make_kgid(current_user_ns(), uv);
526 if (!gid_valid(d->group_id)) 538 if (!gid_valid(d->group_id))
527 return 0; 539 return 0;
528 d->group_id_present = 1; 540 d->group_id_present = 1;
@@ -895,9 +907,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
895 fc->writeback_cache = 1; 907 fc->writeback_cache = 1;
896 if (arg->time_gran && arg->time_gran <= 1000000000) 908 if (arg->time_gran && arg->time_gran <= 1000000000)
897 fc->sb->s_time_gran = arg->time_gran; 909 fc->sb->s_time_gran = arg->time_gran;
898 else
899 fc->sb->s_time_gran = 1000000000;
900
901 } else { 910 } else {
902 ra_pages = fc->max_read / PAGE_CACHE_SIZE; 911 ra_pages = fc->max_read / PAGE_CACHE_SIZE;
903 fc->no_lock = 1; 912 fc->no_lock = 1;
@@ -926,7 +935,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
926 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | 935 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
927 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | 936 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
928 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | 937 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
929 FUSE_WRITEBACK_CACHE; 938 FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
930 req->in.h.opcode = FUSE_INIT; 939 req->in.h.opcode = FUSE_INIT;
931 req->in.numargs = 1; 940 req->in.numargs = 1;
932 req->in.args[0].size = sizeof(*arg); 941 req->in.args[0].size = sizeof(*arg);
@@ -1006,7 +1015,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1006 1015
1007 sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION); 1016 sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
1008 1017
1009 if (!parse_fuse_opt((char *) data, &d, is_bdev)) 1018 if (!parse_fuse_opt(data, &d, is_bdev))
1010 goto err; 1019 goto err;
1011 1020
1012 if (is_bdev) { 1021 if (is_bdev) {
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4fc3a3046174..26b3f952e6b1 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
981 int error = 0; 981 int error = 0;
982 982
983 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; 983 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
984 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE; 984 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
985 985
986 mutex_lock(&fp->f_fl_mutex); 986 mutex_lock(&fp->f_fl_mutex);
987 987
@@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
991 goto out; 991 goto out;
992 flock_lock_file_wait(file, 992 flock_lock_file_wait(file,
993 &(struct file_lock){.fl_type = F_UNLCK}); 993 &(struct file_lock){.fl_type = F_UNLCK});
994 gfs2_glock_dq_wait(fl_gh); 994 gfs2_glock_dq(fl_gh);
995 gfs2_holder_reinit(state, flags, fl_gh); 995 gfs2_holder_reinit(state, flags, fl_gh);
996 } else { 996 } else {
997 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, 997 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c355f7320e44..ee4e04fe60fc 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
731 cachep = gfs2_glock_aspace_cachep; 731 cachep = gfs2_glock_aspace_cachep;
732 else 732 else
733 cachep = gfs2_glock_cachep; 733 cachep = gfs2_glock_cachep;
734 gl = kmem_cache_alloc(cachep, GFP_KERNEL); 734 gl = kmem_cache_alloc(cachep, GFP_NOFS);
735 if (!gl) 735 if (!gl)
736 return -ENOMEM; 736 return -ENOMEM;
737 737
738 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 738 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
739 739
740 if (glops->go_flags & GLOF_LVB) { 740 if (glops->go_flags & GLOF_LVB) {
741 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL); 741 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
742 if (!gl->gl_lksb.sb_lvbptr) { 742 if (!gl->gl_lksb.sb_lvbptr) {
743 kmem_cache_free(cachep, gl); 743 kmem_cache_free(cachep, gl);
744 return -ENOMEM; 744 return -ENOMEM;
@@ -1404,12 +1404,16 @@ __acquires(&lru_lock)
1404 gl = list_entry(list->next, struct gfs2_glock, gl_lru); 1404 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1405 list_del_init(&gl->gl_lru); 1405 list_del_init(&gl->gl_lru);
1406 if (!spin_trylock(&gl->gl_spin)) { 1406 if (!spin_trylock(&gl->gl_spin)) {
1407add_back_to_lru:
1407 list_add(&gl->gl_lru, &lru_list); 1408 list_add(&gl->gl_lru, &lru_list);
1408 atomic_inc(&lru_count); 1409 atomic_inc(&lru_count);
1409 continue; 1410 continue;
1410 } 1411 }
1412 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1413 spin_unlock(&gl->gl_spin);
1414 goto add_back_to_lru;
1415 }
1411 clear_bit(GLF_LRU, &gl->gl_flags); 1416 clear_bit(GLF_LRU, &gl->gl_flags);
1412 spin_unlock(&lru_lock);
1413 gl->gl_lockref.count++; 1417 gl->gl_lockref.count++;
1414 if (demote_ok(gl)) 1418 if (demote_ok(gl))
1415 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1419 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1417,7 +1421,7 @@ __acquires(&lru_lock)
1417 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1421 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1418 gl->gl_lockref.count--; 1422 gl->gl_lockref.count--;
1419 spin_unlock(&gl->gl_spin); 1423 spin_unlock(&gl->gl_spin);
1420 spin_lock(&lru_lock); 1424 cond_resched_lock(&lru_lock);
1421 } 1425 }
1422} 1426}
1423 1427
@@ -1442,7 +1446,7 @@ static long gfs2_scan_glock_lru(int nr)
1442 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1446 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1443 1447
1444 /* Test for being demotable */ 1448 /* Test for being demotable */
1445 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1449 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1446 list_move(&gl->gl_lru, &dispose); 1450 list_move(&gl->gl_lru, &dispose);
1447 atomic_dec(&lru_count); 1451 atomic_dec(&lru_count);
1448 freed++; 1452 freed++;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index fc1100781bbc..2ffc67dce87f 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl)
234 * inode_go_inval - prepare a inode glock to be released 234 * inode_go_inval - prepare a inode glock to be released
235 * @gl: the glock 235 * @gl: the glock
236 * @flags: 236 * @flags:
237 * 237 *
238 * Normally we invlidate everything, but if we are moving into 238 * Normally we invalidate everything, but if we are moving into
239 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 239 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
240 * can keep hold of the metadata, since it won't have changed. 240 * can keep hold of the metadata, since it won't have changed.
241 * 241 *
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 91f274de1246..4fafea1c9ecf 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -1036,8 +1036,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1036 1036
1037 new_size = old_size + RECOVER_SIZE_INC; 1037 new_size = old_size + RECOVER_SIZE_INC;
1038 1038
1039 submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); 1039 submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1040 result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); 1040 result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1041 if (!submit || !result) { 1041 if (!submit || !result) {
1042 kfree(submit); 1042 kfree(submit);
1043 kfree(result); 1043 kfree(result);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index db629d1bd1bd..f4cb9c0d6bbd 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
337 337
338/** 338/**
339 * gfs2_free_extlen - Return extent length of free blocks 339 * gfs2_free_extlen - Return extent length of free blocks
340 * @rbm: Starting position 340 * @rrbm: Starting position
341 * @len: Max length to check 341 * @len: Max length to check
342 * 342 *
343 * Starting at the block specified by the rbm, see how many free blocks 343 * Starting at the block specified by the rbm, see how many free blocks
@@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2522 2522
2523/** 2523/**
2524 * gfs2_rlist_free - free a resource group list 2524 * gfs2_rlist_free - free a resource group list
2525 * @list: the list of resource groups 2525 * @rlist: the list of resource groups
2526 * 2526 *
2527 */ 2527 */
2528 2528
diff --git a/fs/namei.c b/fs/namei.c
index 005771f97189..8ae644c1150f 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2256,9 +2256,10 @@ done:
2256 goto out; 2256 goto out;
2257 } 2257 }
2258 path->dentry = dentry; 2258 path->dentry = dentry;
2259 path->mnt = mntget(nd->path.mnt); 2259 path->mnt = nd->path.mnt;
2260 if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW)) 2260 if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
2261 return 1; 2261 return 1;
2262 mntget(path->mnt);
2262 follow_mount(path); 2263 follow_mount(path);
2263 error = 0; 2264 error = 0;
2264out: 2265out:
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 8f98138cbc43..f11b9eed0de1 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -756,7 +756,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
756 spin_unlock(&dreq->lock); 756 spin_unlock(&dreq->lock);
757 757
758 while (!list_empty(&hdr->pages)) { 758 while (!list_empty(&hdr->pages)) {
759 bool do_destroy = true;
760 759
761 req = nfs_list_entry(hdr->pages.next); 760 req = nfs_list_entry(hdr->pages.next);
762 nfs_list_remove_request(req); 761 nfs_list_remove_request(req);
@@ -765,7 +764,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
765 case NFS_IOHDR_NEED_COMMIT: 764 case NFS_IOHDR_NEED_COMMIT:
766 kref_get(&req->wb_kref); 765 kref_get(&req->wb_kref);
767 nfs_mark_request_commit(req, hdr->lseg, &cinfo); 766 nfs_mark_request_commit(req, hdr->lseg, &cinfo);
768 do_destroy = false;
769 } 767 }
770 nfs_unlock_and_release_request(req); 768 nfs_unlock_and_release_request(req);
771 } 769 }
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 82ddbf46660e..f415cbf9f6c3 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -244,6 +244,7 @@ void nfs_pgio_data_release(struct nfs_pgio_data *);
244int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); 244int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
245int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *, 245int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *,
246 const struct rpc_call_ops *, int, int); 246 const struct rpc_call_ops *, int, int);
247void nfs_free_request(struct nfs_page *req);
247 248
248static inline void nfs_iocounter_init(struct nfs_io_counter *c) 249static inline void nfs_iocounter_init(struct nfs_io_counter *c)
249{ 250{
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 871d6eda8dba..8f854dde4150 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -247,3 +247,46 @@ const struct xattr_handler *nfs3_xattr_handlers[] = {
247 &posix_acl_default_xattr_handler, 247 &posix_acl_default_xattr_handler,
248 NULL, 248 NULL,
249}; 249};
250
251static int
252nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
253 size_t size, ssize_t *result)
254{
255 struct posix_acl *acl;
256 char *p = data + *result;
257
258 acl = get_acl(inode, type);
259 if (!acl)
260 return 0;
261
262 posix_acl_release(acl);
263
264 *result += strlen(name);
265 *result += 1;
266 if (!size)
267 return 0;
268 if (*result > size)
269 return -ERANGE;
270
271 strcpy(p, name);
272 return 0;
273}
274
275ssize_t
276nfs3_listxattr(struct dentry *dentry, char *data, size_t size)
277{
278 struct inode *inode = dentry->d_inode;
279 ssize_t result = 0;
280 int error;
281
282 error = nfs3_list_one_acl(inode, ACL_TYPE_ACCESS,
283 POSIX_ACL_XATTR_ACCESS, data, size, &result);
284 if (error)
285 return error;
286
287 error = nfs3_list_one_acl(inode, ACL_TYPE_DEFAULT,
288 POSIX_ACL_XATTR_DEFAULT, data, size, &result);
289 if (error)
290 return error;
291 return result;
292}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index e7daa42bbc86..f0afa291fd58 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -885,7 +885,7 @@ static const struct inode_operations nfs3_dir_inode_operations = {
885 .getattr = nfs_getattr, 885 .getattr = nfs_getattr,
886 .setattr = nfs_setattr, 886 .setattr = nfs_setattr,
887#ifdef CONFIG_NFS_V3_ACL 887#ifdef CONFIG_NFS_V3_ACL
888 .listxattr = generic_listxattr, 888 .listxattr = nfs3_listxattr,
889 .getxattr = generic_getxattr, 889 .getxattr = generic_getxattr,
890 .setxattr = generic_setxattr, 890 .setxattr = generic_setxattr,
891 .removexattr = generic_removexattr, 891 .removexattr = generic_removexattr,
@@ -899,7 +899,7 @@ static const struct inode_operations nfs3_file_inode_operations = {
899 .getattr = nfs_getattr, 899 .getattr = nfs_getattr,
900 .setattr = nfs_setattr, 900 .setattr = nfs_setattr,
901#ifdef CONFIG_NFS_V3_ACL 901#ifdef CONFIG_NFS_V3_ACL
902 .listxattr = generic_listxattr, 902 .listxattr = nfs3_listxattr,
903 .getxattr = generic_getxattr, 903 .getxattr = generic_getxattr,
904 .setxattr = generic_setxattr, 904 .setxattr = generic_setxattr,
905 .removexattr = generic_removexattr, 905 .removexattr = generic_removexattr,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index b6ee3a6ee96d..17fab89f6358 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -29,8 +29,6 @@
29static struct kmem_cache *nfs_page_cachep; 29static struct kmem_cache *nfs_page_cachep;
30static const struct rpc_call_ops nfs_pgio_common_ops; 30static const struct rpc_call_ops nfs_pgio_common_ops;
31 31
32static void nfs_free_request(struct nfs_page *);
33
34static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) 32static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
35{ 33{
36 p->npages = pagecount; 34 p->npages = pagecount;
@@ -239,20 +237,28 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
239 WARN_ON_ONCE(prev == req); 237 WARN_ON_ONCE(prev == req);
240 238
241 if (!prev) { 239 if (!prev) {
240 /* a head request */
242 req->wb_head = req; 241 req->wb_head = req;
243 req->wb_this_page = req; 242 req->wb_this_page = req;
244 } else { 243 } else {
244 /* a subrequest */
245 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head); 245 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
246 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags)); 246 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
247 req->wb_head = prev->wb_head; 247 req->wb_head = prev->wb_head;
248 req->wb_this_page = prev->wb_this_page; 248 req->wb_this_page = prev->wb_this_page;
249 prev->wb_this_page = req; 249 prev->wb_this_page = req;
250 250
251 /* All subrequests take a ref on the head request until
252 * nfs_page_group_destroy is called */
253 kref_get(&req->wb_head->wb_kref);
254
251 /* grab extra ref if head request has extra ref from 255 /* grab extra ref if head request has extra ref from
252 * the write/commit path to handle handoff between write 256 * the write/commit path to handle handoff between write
253 * and commit lists */ 257 * and commit lists */
254 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) 258 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
259 set_bit(PG_INODE_REF, &req->wb_flags);
255 kref_get(&req->wb_kref); 260 kref_get(&req->wb_kref);
261 }
256 } 262 }
257} 263}
258 264
@@ -269,6 +275,10 @@ nfs_page_group_destroy(struct kref *kref)
269 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); 275 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
270 struct nfs_page *tmp, *next; 276 struct nfs_page *tmp, *next;
271 277
278 /* subrequests must release the ref on the head request */
279 if (req->wb_head != req)
280 nfs_release_request(req->wb_head);
281
272 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) 282 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
273 return; 283 return;
274 284
@@ -394,7 +404,7 @@ static void nfs_clear_request(struct nfs_page *req)
394 * 404 *
395 * Note: Should never be called with the spinlock held! 405 * Note: Should never be called with the spinlock held!
396 */ 406 */
397static void nfs_free_request(struct nfs_page *req) 407void nfs_free_request(struct nfs_page *req)
398{ 408{
399 WARN_ON_ONCE(req->wb_this_page != req); 409 WARN_ON_ONCE(req->wb_this_page != req);
400 410
@@ -925,7 +935,6 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
925 nfs_pageio_doio(desc); 935 nfs_pageio_doio(desc);
926 if (desc->pg_error < 0) 936 if (desc->pg_error < 0)
927 return 0; 937 return 0;
928 desc->pg_moreio = 0;
929 if (desc->pg_recoalesce) 938 if (desc->pg_recoalesce)
930 return 0; 939 return 0;
931 /* retry add_request for this subreq */ 940 /* retry add_request for this subreq */
@@ -972,6 +981,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
972 desc->pg_count = 0; 981 desc->pg_count = 0;
973 desc->pg_base = 0; 982 desc->pg_base = 0;
974 desc->pg_recoalesce = 0; 983 desc->pg_recoalesce = 0;
984 desc->pg_moreio = 0;
975 985
976 while (!list_empty(&head)) { 986 while (!list_empty(&head)) {
977 struct nfs_page *req; 987 struct nfs_page *req;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 98ff061ccaf3..5e2f10304548 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -46,6 +46,7 @@ static const struct rpc_call_ops nfs_commit_ops;
46static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 46static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
47static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 47static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
48static const struct nfs_rw_ops nfs_rw_write_ops; 48static const struct nfs_rw_ops nfs_rw_write_ops;
49static void nfs_clear_request_commit(struct nfs_page *req);
49 50
50static struct kmem_cache *nfs_wdata_cachep; 51static struct kmem_cache *nfs_wdata_cachep;
51static mempool_t *nfs_wdata_mempool; 52static mempool_t *nfs_wdata_mempool;
@@ -91,8 +92,15 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
91 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 92 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
92} 93}
93 94
95/*
96 * nfs_page_find_head_request_locked - find head request associated with @page
97 *
98 * must be called while holding the inode lock.
99 *
100 * returns matching head request with reference held, or NULL if not found.
101 */
94static struct nfs_page * 102static struct nfs_page *
95nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page) 103nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
96{ 104{
97 struct nfs_page *req = NULL; 105 struct nfs_page *req = NULL;
98 106
@@ -104,25 +112,33 @@ nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
104 /* Linearly search the commit list for the correct req */ 112 /* Linearly search the commit list for the correct req */
105 list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) { 113 list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
106 if (freq->wb_page == page) { 114 if (freq->wb_page == page) {
107 req = freq; 115 req = freq->wb_head;
108 break; 116 break;
109 } 117 }
110 } 118 }
111 } 119 }
112 120
113 if (req) 121 if (req) {
122 WARN_ON_ONCE(req->wb_head != req);
123
114 kref_get(&req->wb_kref); 124 kref_get(&req->wb_kref);
125 }
115 126
116 return req; 127 return req;
117} 128}
118 129
119static struct nfs_page *nfs_page_find_request(struct page *page) 130/*
131 * nfs_page_find_head_request - find head request associated with @page
132 *
133 * returns matching head request with reference held, or NULL if not found.
134 */
135static struct nfs_page *nfs_page_find_head_request(struct page *page)
120{ 136{
121 struct inode *inode = page_file_mapping(page)->host; 137 struct inode *inode = page_file_mapping(page)->host;
122 struct nfs_page *req = NULL; 138 struct nfs_page *req = NULL;
123 139
124 spin_lock(&inode->i_lock); 140 spin_lock(&inode->i_lock);
125 req = nfs_page_find_request_locked(NFS_I(inode), page); 141 req = nfs_page_find_head_request_locked(NFS_I(inode), page);
126 spin_unlock(&inode->i_lock); 142 spin_unlock(&inode->i_lock);
127 return req; 143 return req;
128} 144}
@@ -274,36 +290,246 @@ static void nfs_end_page_writeback(struct nfs_page *req)
274 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 290 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
275} 291}
276 292
277static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) 293
294/* nfs_page_group_clear_bits
295 * @req - an nfs request
296 * clears all page group related bits from @req
297 */
298static void
299nfs_page_group_clear_bits(struct nfs_page *req)
300{
301 clear_bit(PG_TEARDOWN, &req->wb_flags);
302 clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
303 clear_bit(PG_UPTODATE, &req->wb_flags);
304 clear_bit(PG_WB_END, &req->wb_flags);
305 clear_bit(PG_REMOVE, &req->wb_flags);
306}
307
308
309/*
310 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req
311 *
312 * this is a helper function for nfs_lock_and_join_requests
313 *
314 * @inode - inode associated with request page group, must be holding inode lock
315 * @head - head request of page group, must be holding head lock
316 * @req - request that couldn't lock and needs to wait on the req bit lock
317 * @nonblock - if true, don't actually wait
318 *
319 * NOTE: this must be called holding page_group bit lock and inode spin lock
320 * and BOTH will be released before returning.
321 *
322 * returns 0 on success, < 0 on error.
323 */
324static int
325nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
326 struct nfs_page *req, bool nonblock)
327 __releases(&inode->i_lock)
328{
329 struct nfs_page *tmp;
330 int ret;
331
332 /* relinquish all the locks successfully grabbed this run */
333 for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
334 nfs_unlock_request(tmp);
335
336 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
337
338 /* grab a ref on the request that will be waited on */
339 kref_get(&req->wb_kref);
340
341 nfs_page_group_unlock(head);
342 spin_unlock(&inode->i_lock);
343
344 /* release ref from nfs_page_find_head_request_locked */
345 nfs_release_request(head);
346
347 if (!nonblock)
348 ret = nfs_wait_on_request(req);
349 else
350 ret = -EAGAIN;
351 nfs_release_request(req);
352
353 return ret;
354}
355
356/*
357 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
358 *
359 * @destroy_list - request list (using wb_this_page) terminated by @old_head
360 * @old_head - the old head of the list
361 *
362 * All subrequests must be locked and removed from all lists, so at this point
363 * they are only "active" in this function, and possibly in nfs_wait_on_request
364 * with a reference held by some other context.
365 */
366static void
367nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
368 struct nfs_page *old_head)
369{
370 while (destroy_list) {
371 struct nfs_page *subreq = destroy_list;
372
373 destroy_list = (subreq->wb_this_page == old_head) ?
374 NULL : subreq->wb_this_page;
375
376 WARN_ON_ONCE(old_head != subreq->wb_head);
377
378 /* make sure old group is not used */
379 subreq->wb_head = subreq;
380 subreq->wb_this_page = subreq;
381
382 nfs_clear_request_commit(subreq);
383
384 /* subreq is now totally disconnected from page group or any
385 * write / commit lists. last chance to wake any waiters */
386 nfs_unlock_request(subreq);
387
388 if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
389 /* release ref on old head request */
390 nfs_release_request(old_head);
391
392 nfs_page_group_clear_bits(subreq);
393
394 /* release the PG_INODE_REF reference */
395 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
396 nfs_release_request(subreq);
397 else
398 WARN_ON_ONCE(1);
399 } else {
400 WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
401 /* zombie requests have already released the last
402 * reference and were waiting on the rest of the
403 * group to complete. Since it's no longer part of a
404 * group, simply free the request */
405 nfs_page_group_clear_bits(subreq);
406 nfs_free_request(subreq);
407 }
408 }
409}
410
411/*
412 * nfs_lock_and_join_requests - join all subreqs to the head req and return
413 * a locked reference, cancelling any pending
414 * operations for this page.
415 *
416 * @page - the page used to lookup the "page group" of nfs_page structures
417 * @nonblock - if true, don't block waiting for request locks
418 *
419 * This function joins all sub requests to the head request by first
420 * locking all requests in the group, cancelling any pending operations
421 * and finally updating the head request to cover the whole range covered by
422 * the (former) group. All subrequests are removed from any write or commit
423 * lists, unlinked from the group and destroyed.
424 *
425 * Returns a locked, referenced pointer to the head request - which after
426 * this call is guaranteed to be the only request associated with the page.
427 * Returns NULL if no requests are found for @page, or a ERR_PTR if an
428 * error was encountered.
429 */
430static struct nfs_page *
431nfs_lock_and_join_requests(struct page *page, bool nonblock)
278{ 432{
279 struct inode *inode = page_file_mapping(page)->host; 433 struct inode *inode = page_file_mapping(page)->host;
280 struct nfs_page *req; 434 struct nfs_page *head, *subreq;
435 struct nfs_page *destroy_list = NULL;
436 unsigned int total_bytes;
281 int ret; 437 int ret;
282 438
439try_again:
440 total_bytes = 0;
441
442 WARN_ON_ONCE(destroy_list);
443
283 spin_lock(&inode->i_lock); 444 spin_lock(&inode->i_lock);
284 for (;;) { 445
285 req = nfs_page_find_request_locked(NFS_I(inode), page); 446 /*
286 if (req == NULL) 447 * A reference is taken only on the head request which acts as a
287 break; 448 * reference to the whole page group - the group will not be destroyed
288 if (nfs_lock_request(req)) 449 * until the head reference is released.
289 break; 450 */
290 /* Note: If we hold the page lock, as is the case in nfs_writepage, 451 head = nfs_page_find_head_request_locked(NFS_I(inode), page);
291 * then the call to nfs_lock_request() will always 452
292 * succeed provided that someone hasn't already marked the 453 if (!head) {
293 * request as dirty (in which case we don't care).
294 */
295 spin_unlock(&inode->i_lock); 454 spin_unlock(&inode->i_lock);
296 if (!nonblock) 455 return NULL;
297 ret = nfs_wait_on_request(req); 456 }
298 else 457
299 ret = -EAGAIN; 458 /* lock each request in the page group */
300 nfs_release_request(req); 459 nfs_page_group_lock(head);
301 if (ret != 0) 460 subreq = head;
461 do {
462 /*
463 * Subrequests are always contiguous, non overlapping
464 * and in order. If not, it's a programming error.
465 */
466 WARN_ON_ONCE(subreq->wb_offset !=
467 (head->wb_offset + total_bytes));
468
469 /* keep track of how many bytes this group covers */
470 total_bytes += subreq->wb_bytes;
471
472 if (!nfs_lock_request(subreq)) {
473 /* releases page group bit lock and
474 * inode spin lock and all references */
475 ret = nfs_unroll_locks_and_wait(inode, head,
476 subreq, nonblock);
477
478 if (ret == 0)
479 goto try_again;
480
302 return ERR_PTR(ret); 481 return ERR_PTR(ret);
303 spin_lock(&inode->i_lock); 482 }
483
484 subreq = subreq->wb_this_page;
485 } while (subreq != head);
486
487 /* Now that all requests are locked, make sure they aren't on any list.
488 * Commit list removal accounting is done after locks are dropped */
489 subreq = head;
490 do {
491 nfs_list_remove_request(subreq);
492 subreq = subreq->wb_this_page;
493 } while (subreq != head);
494
495 /* unlink subrequests from head, destroy them later */
496 if (head->wb_this_page != head) {
497 /* destroy list will be terminated by head */
498 destroy_list = head->wb_this_page;
499 head->wb_this_page = head;
500
501 /* change head request to cover whole range that
502 * the former page group covered */
503 head->wb_bytes = total_bytes;
304 } 504 }
505
506 /*
507 * prepare head request to be added to new pgio descriptor
508 */
509 nfs_page_group_clear_bits(head);
510
511 /*
512 * some part of the group was still on the inode list - otherwise
513 * the group wouldn't be involved in async write.
514 * grab a reference for the head request, iff it needs one.
515 */
516 if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
517 kref_get(&head->wb_kref);
518
519 nfs_page_group_unlock(head);
520
521 /* drop lock to clear_request_commit the head req and clean up
522 * requests on destroy list */
305 spin_unlock(&inode->i_lock); 523 spin_unlock(&inode->i_lock);
306 return req; 524
525 nfs_destroy_unlinked_subrequests(destroy_list, head);
526
527 /* clean up commit list state */
528 nfs_clear_request_commit(head);
529
530 /* still holds ref on head from nfs_page_find_head_request_locked
531 * and still has lock on head from lock loop */
532 return head;
307} 533}
308 534
309/* 535/*
@@ -316,7 +542,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
316 struct nfs_page *req; 542 struct nfs_page *req;
317 int ret = 0; 543 int ret = 0;
318 544
319 req = nfs_find_and_lock_request(page, nonblock); 545 req = nfs_lock_and_join_requests(page, nonblock);
320 if (!req) 546 if (!req)
321 goto out; 547 goto out;
322 ret = PTR_ERR(req); 548 ret = PTR_ERR(req);
@@ -448,7 +674,9 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
448 set_page_private(req->wb_page, (unsigned long)req); 674 set_page_private(req->wb_page, (unsigned long)req);
449 } 675 }
450 nfsi->npages++; 676 nfsi->npages++;
451 set_bit(PG_INODE_REF, &req->wb_flags); 677 /* this a head request for a page group - mark it as having an
678 * extra reference so sub groups can follow suit */
679 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
452 kref_get(&req->wb_kref); 680 kref_get(&req->wb_kref);
453 spin_unlock(&inode->i_lock); 681 spin_unlock(&inode->i_lock);
454} 682}
@@ -474,7 +702,9 @@ static void nfs_inode_remove_request(struct nfs_page *req)
474 nfsi->npages--; 702 nfsi->npages--;
475 spin_unlock(&inode->i_lock); 703 spin_unlock(&inode->i_lock);
476 } 704 }
477 nfs_release_request(req); 705
706 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
707 nfs_release_request(req);
478} 708}
479 709
480static void 710static void
@@ -638,7 +868,6 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
638{ 868{
639 struct nfs_commit_info cinfo; 869 struct nfs_commit_info cinfo;
640 unsigned long bytes = 0; 870 unsigned long bytes = 0;
641 bool do_destroy;
642 871
643 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 872 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
644 goto out; 873 goto out;
@@ -668,7 +897,6 @@ remove_req:
668next: 897next:
669 nfs_unlock_request(req); 898 nfs_unlock_request(req);
670 nfs_end_page_writeback(req); 899 nfs_end_page_writeback(req);
671 do_destroy = !test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags);
672 nfs_release_request(req); 900 nfs_release_request(req);
673 } 901 }
674out: 902out:
@@ -769,7 +997,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
769 spin_lock(&inode->i_lock); 997 spin_lock(&inode->i_lock);
770 998
771 for (;;) { 999 for (;;) {
772 req = nfs_page_find_request_locked(NFS_I(inode), page); 1000 req = nfs_page_find_head_request_locked(NFS_I(inode), page);
773 if (req == NULL) 1001 if (req == NULL)
774 goto out_unlock; 1002 goto out_unlock;
775 1003
@@ -877,7 +1105,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
877 * dropped page. 1105 * dropped page.
878 */ 1106 */
879 do { 1107 do {
880 req = nfs_page_find_request(page); 1108 req = nfs_page_find_head_request(page);
881 if (req == NULL) 1109 if (req == NULL)
882 return 0; 1110 return 0;
883 l_ctx = req->wb_lock_context; 1111 l_ctx = req->wb_lock_context;
@@ -1569,27 +1797,28 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1569 struct nfs_page *req; 1797 struct nfs_page *req;
1570 int ret = 0; 1798 int ret = 0;
1571 1799
1572 for (;;) { 1800 wait_on_page_writeback(page);
1573 wait_on_page_writeback(page); 1801
1574 req = nfs_page_find_request(page); 1802 /* blocking call to cancel all requests and join to a single (head)
1575 if (req == NULL) 1803 * request */
1576 break; 1804 req = nfs_lock_and_join_requests(page, false);
1577 if (nfs_lock_request(req)) { 1805
1578 nfs_clear_request_commit(req); 1806 if (IS_ERR(req)) {
1579 nfs_inode_remove_request(req); 1807 ret = PTR_ERR(req);
1580 /* 1808 } else if (req) {
1581 * In case nfs_inode_remove_request has marked the 1809 /* all requests from this page have been cancelled by
1582 * page as being dirty 1810 * nfs_lock_and_join_requests, so just remove the head
1583 */ 1811 * request from the inode / page_private pointer and
1584 cancel_dirty_page(page, PAGE_CACHE_SIZE); 1812 * release it */
1585 nfs_unlock_and_release_request(req); 1813 nfs_inode_remove_request(req);
1586 break; 1814 /*
1587 } 1815 * In case nfs_inode_remove_request has marked the
1588 ret = nfs_wait_on_request(req); 1816 * page as being dirty
1589 nfs_release_request(req); 1817 */
1590 if (ret < 0) 1818 cancel_dirty_page(page, PAGE_CACHE_SIZE);
1591 break; 1819 nfs_unlock_and_release_request(req);
1592 } 1820 }
1821
1593 return ret; 1822 return ret;
1594} 1823}
1595 1824
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b56b1cc02718..944275c8f56d 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2879,6 +2879,7 @@ again:
2879 * return the conflicting open: 2879 * return the conflicting open:
2880 */ 2880 */
2881 if (conf->len) { 2881 if (conf->len) {
2882 kfree(conf->data);
2882 conf->len = 0; 2883 conf->len = 0;
2883 conf->data = NULL; 2884 conf->data = NULL;
2884 goto again; 2885 goto again;
@@ -2891,6 +2892,7 @@ again:
2891 if (conf->len) { 2892 if (conf->len) {
2892 p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8); 2893 p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
2893 p = xdr_encode_opaque(p, conf->data, conf->len); 2894 p = xdr_encode_opaque(p, conf->data, conf->len);
2895 kfree(conf->data);
2894 } else { /* non - nfsv4 lock in conflict, no clientid nor owner */ 2896 } else { /* non - nfsv4 lock in conflict, no clientid nor owner */
2895 p = xdr_encode_hyper(p, (u64)0); /* clientid */ 2897 p = xdr_encode_hyper(p, (u64)0); /* clientid */
2896 *p++ = cpu_to_be32(0); /* length of owner name */ 2898 *p++ = cpu_to_be32(0); /* length of owner name */
@@ -2907,7 +2909,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
2907 nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid); 2909 nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
2908 else if (nfserr == nfserr_denied) 2910 else if (nfserr == nfserr_denied)
2909 nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied); 2911 nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
2910 kfree(lock->lk_denied.ld_owner.data); 2912
2911 return nfserr; 2913 return nfserr;
2912} 2914}
2913 2915
diff --git a/fs/open.c b/fs/open.c
index 36662d036237..d6fd3acde134 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -263,11 +263,10 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
263 return -EPERM; 263 return -EPERM;
264 264
265 /* 265 /*
266 * We can not allow to do any fallocate operation on an active 266 * We cannot allow any fallocate operation on an active swapfile
267 * swapfile
268 */ 267 */
269 if (IS_SWAPFILE(inode)) 268 if (IS_SWAPFILE(inode))
270 ret = -ETXTBSY; 269 return -ETXTBSY;
271 270
272 /* 271 /*
273 * Revalidate the write permissions, in case security policy has 272 * Revalidate the write permissions, in case security policy has
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 9cd5f63715c0..7f30bdc57d13 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
702 struct dquot *dquot; 702 struct dquot *dquot;
703 unsigned long freed = 0; 703 unsigned long freed = 0;
704 704
705 spin_lock(&dq_list_lock);
705 head = free_dquots.prev; 706 head = free_dquots.prev;
706 while (head != &free_dquots && sc->nr_to_scan) { 707 while (head != &free_dquots && sc->nr_to_scan) {
707 dquot = list_entry(head, struct dquot, dq_free); 708 dquot = list_entry(head, struct dquot, dq_free);
@@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
713 freed++; 714 freed++;
714 head = free_dquots.prev; 715 head = free_dquots.prev;
715 } 716 }
717 spin_unlock(&dq_list_lock);
716 return freed; 718 return freed;
717} 719}
718 720
diff --git a/fs/xattr.c b/fs/xattr.c
index 3377dff18404..c69e6d43a0d2 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -843,7 +843,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
843 843
844 /* wrap around? */ 844 /* wrap around? */
845 len = sizeof(*new_xattr) + size; 845 len = sizeof(*new_xattr) + size;
846 if (len <= sizeof(*new_xattr)) 846 if (len < sizeof(*new_xattr))
847 return NULL; 847 return NULL;
848 848
849 new_xattr = kmalloc(len, GFP_KERNEL); 849 new_xattr = kmalloc(len, GFP_KERNEL);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 96175df211b1..75c3fe5f3d9d 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -4298,8 +4298,8 @@ xfs_bmapi_delay(
4298} 4298}
4299 4299
4300 4300
4301int 4301static int
4302__xfs_bmapi_allocate( 4302xfs_bmapi_allocate(
4303 struct xfs_bmalloca *bma) 4303 struct xfs_bmalloca *bma)
4304{ 4304{
4305 struct xfs_mount *mp = bma->ip->i_mount; 4305 struct xfs_mount *mp = bma->ip->i_mount;
@@ -4578,9 +4578,6 @@ xfs_bmapi_write(
4578 bma.flist = flist; 4578 bma.flist = flist;
4579 bma.firstblock = firstblock; 4579 bma.firstblock = firstblock;
4580 4580
4581 if (flags & XFS_BMAPI_STACK_SWITCH)
4582 bma.stack_switch = 1;
4583
4584 while (bno < end && n < *nmap) { 4581 while (bno < end && n < *nmap) {
4585 inhole = eof || bma.got.br_startoff > bno; 4582 inhole = eof || bma.got.br_startoff > bno;
4586 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); 4583 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 38ba36e9b2f0..b879ca56a64c 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -77,7 +77,6 @@ typedef struct xfs_bmap_free
77 * from written to unwritten, otherwise convert from unwritten to written. 77 * from written to unwritten, otherwise convert from unwritten to written.
78 */ 78 */
79#define XFS_BMAPI_CONVERT 0x040 79#define XFS_BMAPI_CONVERT 0x040
80#define XFS_BMAPI_STACK_SWITCH 0x080
81 80
82#define XFS_BMAPI_FLAGS \ 81#define XFS_BMAPI_FLAGS \
83 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ 82 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
@@ -86,8 +85,7 @@ typedef struct xfs_bmap_free
86 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ 85 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \
87 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ 86 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
88 { XFS_BMAPI_CONTIG, "CONTIG" }, \ 87 { XFS_BMAPI_CONTIG, "CONTIG" }, \
89 { XFS_BMAPI_CONVERT, "CONVERT" }, \ 88 { XFS_BMAPI_CONVERT, "CONVERT" }
90 { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
91 89
92 90
93static inline int xfs_bmapi_aflag(int w) 91static inline int xfs_bmapi_aflag(int w)
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 703b3ec1796c..64731ef3324d 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -249,59 +249,6 @@ xfs_bmap_rtalloc(
249} 249}
250 250
251/* 251/*
252 * Stack switching interfaces for allocation
253 */
254static void
255xfs_bmapi_allocate_worker(
256 struct work_struct *work)
257{
258 struct xfs_bmalloca *args = container_of(work,
259 struct xfs_bmalloca, work);
260 unsigned long pflags;
261 unsigned long new_pflags = PF_FSTRANS;
262
263 /*
264 * we are in a transaction context here, but may also be doing work
265 * in kswapd context, and hence we may need to inherit that state
266 * temporarily to ensure that we don't block waiting for memory reclaim
267 * in any way.
268 */
269 if (args->kswapd)
270 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
271
272 current_set_flags_nested(&pflags, new_pflags);
273
274 args->result = __xfs_bmapi_allocate(args);
275 complete(args->done);
276
277 current_restore_flags_nested(&pflags, new_pflags);
278}
279
280/*
281 * Some allocation requests often come in with little stack to work on. Push
282 * them off to a worker thread so there is lots of stack to use. Otherwise just
283 * call directly to avoid the context switch overhead here.
284 */
285int
286xfs_bmapi_allocate(
287 struct xfs_bmalloca *args)
288{
289 DECLARE_COMPLETION_ONSTACK(done);
290
291 if (!args->stack_switch)
292 return __xfs_bmapi_allocate(args);
293
294
295 args->done = &done;
296 args->kswapd = current_is_kswapd();
297 INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
298 queue_work(xfs_alloc_wq, &args->work);
299 wait_for_completion(&done);
300 destroy_work_on_stack(&args->work);
301 return args->result;
302}
303
304/*
305 * Check if the endoff is outside the last extent. If so the caller will grow 252 * Check if the endoff is outside the last extent. If so the caller will grow
306 * the allocation to a stripe unit boundary. All offsets are considered outside 253 * the allocation to a stripe unit boundary. All offsets are considered outside
307 * the end of file for an empty fork, so 1 is returned in *eof in that case. 254 * the end of file for an empty fork, so 1 is returned in *eof in that case.
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 075f72232a64..2fdb72d2c908 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -55,8 +55,6 @@ struct xfs_bmalloca {
55 bool userdata;/* set if is user data */ 55 bool userdata;/* set if is user data */
56 bool aeof; /* allocated space at eof */ 56 bool aeof; /* allocated space at eof */
57 bool conv; /* overwriting unwritten extents */ 57 bool conv; /* overwriting unwritten extents */
58 bool stack_switch;
59 bool kswapd; /* allocation in kswapd context */
60 int flags; 58 int flags;
61 struct completion *done; 59 struct completion *done;
62 struct work_struct work; 60 struct work_struct work;
@@ -66,8 +64,6 @@ struct xfs_bmalloca {
66int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist, 64int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
67 int *committed); 65 int *committed);
68int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); 66int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
69int xfs_bmapi_allocate(struct xfs_bmalloca *args);
70int __xfs_bmapi_allocate(struct xfs_bmalloca *args);
71int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, 67int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
72 int whichfork, int *eof); 68 int whichfork, int *eof);
73int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip, 69int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index bf810c6baf2b..cf893bc1e373 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -33,6 +33,7 @@
33#include "xfs_error.h" 33#include "xfs_error.h"
34#include "xfs_trace.h" 34#include "xfs_trace.h"
35#include "xfs_cksum.h" 35#include "xfs_cksum.h"
36#include "xfs_alloc.h"
36 37
37/* 38/*
38 * Cursor allocation zone. 39 * Cursor allocation zone.
@@ -2323,7 +2324,7 @@ error1:
2323 * record (to be inserted into parent). 2324 * record (to be inserted into parent).
2324 */ 2325 */
2325STATIC int /* error */ 2326STATIC int /* error */
2326xfs_btree_split( 2327__xfs_btree_split(
2327 struct xfs_btree_cur *cur, 2328 struct xfs_btree_cur *cur,
2328 int level, 2329 int level,
2329 union xfs_btree_ptr *ptrp, 2330 union xfs_btree_ptr *ptrp,
@@ -2503,6 +2504,85 @@ error0:
2503 return error; 2504 return error;
2504} 2505}
2505 2506
2507struct xfs_btree_split_args {
2508 struct xfs_btree_cur *cur;
2509 int level;
2510 union xfs_btree_ptr *ptrp;
2511 union xfs_btree_key *key;
2512 struct xfs_btree_cur **curp;
2513 int *stat; /* success/failure */
2514 int result;
2515 bool kswapd; /* allocation in kswapd context */
2516 struct completion *done;
2517 struct work_struct work;
2518};
2519
2520/*
2521 * Stack switching interfaces for allocation
2522 */
2523static void
2524xfs_btree_split_worker(
2525 struct work_struct *work)
2526{
2527 struct xfs_btree_split_args *args = container_of(work,
2528 struct xfs_btree_split_args, work);
2529 unsigned long pflags;
2530 unsigned long new_pflags = PF_FSTRANS;
2531
2532 /*
2533 * we are in a transaction context here, but may also be doing work
2534 * in kswapd context, and hence we may need to inherit that state
2535 * temporarily to ensure that we don't block waiting for memory reclaim
2536 * in any way.
2537 */
2538 if (args->kswapd)
2539 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2540
2541 current_set_flags_nested(&pflags, new_pflags);
2542
2543 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
2544 args->key, args->curp, args->stat);
2545 complete(args->done);
2546
2547 current_restore_flags_nested(&pflags, new_pflags);
2548}
2549
2550/*
2551 * BMBT split requests often come in with little stack to work on. Push
2552 * them off to a worker thread so there is lots of stack to use. For the other
2553 * btree types, just call directly to avoid the context switch overhead here.
2554 */
2555STATIC int /* error */
2556xfs_btree_split(
2557 struct xfs_btree_cur *cur,
2558 int level,
2559 union xfs_btree_ptr *ptrp,
2560 union xfs_btree_key *key,
2561 struct xfs_btree_cur **curp,
2562 int *stat) /* success/failure */
2563{
2564 struct xfs_btree_split_args args;
2565 DECLARE_COMPLETION_ONSTACK(done);
2566
2567 if (cur->bc_btnum != XFS_BTNUM_BMAP)
2568 return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
2569
2570 args.cur = cur;
2571 args.level = level;
2572 args.ptrp = ptrp;
2573 args.key = key;
2574 args.curp = curp;
2575 args.stat = stat;
2576 args.done = &done;
2577 args.kswapd = current_is_kswapd();
2578 INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
2579 queue_work(xfs_alloc_wq, &args.work);
2580 wait_for_completion(&done);
2581 destroy_work_on_stack(&args.work);
2582 return args.result;
2583}
2584
2585
2506/* 2586/*
2507 * Copy the old inode root contents into a real block and make the 2587 * Copy the old inode root contents into a real block and make the
2508 * broot point to it. 2588 * broot point to it.
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 6c5eb4c551e3..6d3ec2b6ee29 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -749,8 +749,7 @@ xfs_iomap_write_allocate(
749 * pointer that the caller gave to us. 749 * pointer that the caller gave to us.
750 */ 750 */
751 error = xfs_bmapi_write(tp, ip, map_start_fsb, 751 error = xfs_bmapi_write(tp, ip, map_start_fsb,
752 count_fsb, 752 count_fsb, 0,
753 XFS_BMAPI_STACK_SWITCH,
754 &first_block, 1, 753 &first_block, 1,
755 imap, &nimaps, &free_list); 754 imap, &nimaps, &free_list);
756 if (error) 755 if (error)
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index c3453b11f563..7703fa6770ff 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -483,10 +483,16 @@ xfs_sb_quota_to_disk(
483 } 483 }
484 484
485 /* 485 /*
486 * GQUOTINO and PQUOTINO cannot be used together in versions 486 * GQUOTINO and PQUOTINO cannot be used together in versions of
487 * of superblock that do not have pquotino. from->sb_flags 487 * superblock that do not have pquotino. from->sb_flags tells us which
488 * tells us which quota is active and should be copied to 488 * quota is active and should be copied to disk. If neither are active,
489 * disk. 489 * make sure we write NULLFSINO to the sb_gquotino field as a quota
490 * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
491 * bit is set.
492 *
493 * Note that we don't need to handle the sb_uquotino or sb_pquotino here
494 * as they do not require any translation. Hence the main sb field loop
495 * will write them appropriately from the in-core superblock.
490 */ 496 */
491 if ((*fields & XFS_SB_GQUOTINO) && 497 if ((*fields & XFS_SB_GQUOTINO) &&
492 (from->sb_qflags & XFS_GQUOTA_ACCT)) 498 (from->sb_qflags & XFS_GQUOTA_ACCT))
@@ -494,6 +500,17 @@ xfs_sb_quota_to_disk(
494 else if ((*fields & XFS_SB_PQUOTINO) && 500 else if ((*fields & XFS_SB_PQUOTINO) &&
495 (from->sb_qflags & XFS_PQUOTA_ACCT)) 501 (from->sb_qflags & XFS_PQUOTA_ACCT))
496 to->sb_gquotino = cpu_to_be64(from->sb_pquotino); 502 to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
503 else {
504 /*
505 * We can't rely on just the fields being logged to tell us
506 * that it is safe to write NULLFSINO - we should only do that
507 * if quotas are not actually enabled. Hence only write
508 * NULLFSINO if both in-core quota inodes are NULL.
509 */
510 if (from->sb_gquotino == NULLFSINO &&
511 from->sb_pquotino == NULLFSINO)
512 to->sb_gquotino = cpu_to_be64(NULLFSINO);
513 }
497 514
498 *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO); 515 *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
499} 516}
diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
index 002a2855c046..3d33794e4f3e 100644
--- a/include/dt-bindings/pinctrl/dra.h
+++ b/include/dt-bindings/pinctrl/dra.h
@@ -30,7 +30,8 @@
30#define MUX_MODE14 0xe 30#define MUX_MODE14 0xe
31#define MUX_MODE15 0xf 31#define MUX_MODE15 0xf
32 32
33#define PULL_ENA (1 << 16) 33#define PULL_ENA (0 << 16)
34#define PULL_DIS (1 << 16)
34#define PULL_UP (1 << 17) 35#define PULL_UP (1 << 17)
35#define INPUT_EN (1 << 18) 36#define INPUT_EN (1 << 18)
36#define SLEWCONTROL (1 << 19) 37#define SLEWCONTROL (1 << 19)
@@ -38,10 +39,10 @@
38#define WAKEUP_EVENT (1 << 25) 39#define WAKEUP_EVENT (1 << 25)
39 40
40/* Active pin states */ 41/* Active pin states */
41#define PIN_OUTPUT 0 42#define PIN_OUTPUT (0 | PULL_DIS)
42#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP) 43#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP)
43#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA) 44#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA)
44#define PIN_INPUT INPUT_EN 45#define PIN_INPUT (INPUT_EN | PULL_DIS)
45#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL) 46#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL)
46#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP) 47#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP)
47#define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN) 48#define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ec4112d257bc..8f8ae95c6e27 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -482,8 +482,8 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
482 *********************************************************************/ 482 *********************************************************************/
483 483
484/* Special Values of .frequency field */ 484/* Special Values of .frequency field */
485#define CPUFREQ_ENTRY_INVALID ~0 485#define CPUFREQ_ENTRY_INVALID ~0u
486#define CPUFREQ_TABLE_END ~1 486#define CPUFREQ_TABLE_END ~1u
487/* Special Values of .flags field */ 487/* Special Values of .flags field */
488#define CPUFREQ_BOOST_FREQ (1 << 0) 488#define CPUFREQ_BOOST_FREQ (1 << 0)
489 489
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 255cd5cc0754..a23c096b3080 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -80,6 +80,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
80bool isolate_huge_page(struct page *page, struct list_head *list); 80bool isolate_huge_page(struct page *page, struct list_head *list);
81void putback_active_hugepage(struct page *page); 81void putback_active_hugepage(struct page *page);
82bool is_hugepage_active(struct page *page); 82bool is_hugepage_active(struct page *page);
83void free_huge_page(struct page *page);
83 84
84#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 85#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
85pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 86pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5ab4e3a76721..92abb497ab14 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -593,6 +593,7 @@ struct ata_host {
593 struct device *dev; 593 struct device *dev;
594 void __iomem * const *iomap; 594 void __iomem * const *iomap;
595 unsigned int n_ports; 595 unsigned int n_ports;
596 unsigned int n_tags; /* nr of NCQ tags */
596 void *private_data; 597 void *private_data;
597 struct ata_port_operations *ops; 598 struct ata_port_operations *ops;
598 unsigned long flags; 599 unsigned long flags;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b12f4bbd064c..35b51e7af886 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -578,8 +578,6 @@ struct mlx4_cq {
578 u32 cons_index; 578 u32 cons_index;
579 579
580 u16 irq; 580 u16 irq;
581 bool irq_affinity_change;
582
583 __be32 *set_ci_db; 581 __be32 *set_ci_db;
584 __be32 *arm_db; 582 __be32 *arm_db;
585 int arm_sn; 583 int arm_sn;
@@ -1167,6 +1165,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1167 int *vector); 1165 int *vector);
1168void mlx4_release_eq(struct mlx4_dev *dev, int vec); 1166void mlx4_release_eq(struct mlx4_dev *dev, int vec);
1169 1167
1168int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
1169
1170int mlx4_get_phys_port_id(struct mlx4_dev *dev); 1170int mlx4_get_phys_port_id(struct mlx4_dev *dev);
1171int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); 1171int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
1172int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); 1172int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 11692dea18aa..42aa9b9ecd5f 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -17,6 +17,7 @@
17#include <linux/lockdep.h> 17#include <linux/lockdep.h>
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
20#include <linux/osq_lock.h>
20 21
21/* 22/*
22 * Simple, straightforward mutexes with strict semantics: 23 * Simple, straightforward mutexes with strict semantics:
@@ -46,7 +47,6 @@
46 * - detects multi-task circular deadlocks and prints out all affected 47 * - detects multi-task circular deadlocks and prints out all affected
47 * locks and tasks (and only those tasks) 48 * locks and tasks (and only those tasks)
48 */ 49 */
49struct optimistic_spin_queue;
50struct mutex { 50struct mutex {
51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */ 51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */
52 atomic_t count; 52 atomic_t count;
@@ -56,7 +56,7 @@ struct mutex {
56 struct task_struct *owner; 56 struct task_struct *owner;
57#endif 57#endif
58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
59 struct optimistic_spin_queue *osq; /* Spinner MCS lock */ 59 struct optimistic_spin_queue osq; /* Spinner MCS lock */
60#endif 60#endif
61#ifdef CONFIG_DEBUG_MUTEXES 61#ifdef CONFIG_DEBUG_MUTEXES
62 const char *name; 62 const char *name;
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 05117899fcb4..0ff360d5b3b3 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -73,6 +73,8 @@ extern int early_init_dt_scan_root(unsigned long node, const char *uname,
73 int depth, void *data); 73 int depth, void *data);
74 74
75extern bool early_init_dt_scan(void *params); 75extern bool early_init_dt_scan(void *params);
76extern bool early_init_dt_verify(void *params);
77extern void early_init_dt_scan_nodes(void);
76 78
77extern const char *of_flat_dt_get_machine_name(void); 79extern const char *of_flat_dt_get_machine_name(void);
78extern const void *of_flat_dt_match_machine(const void *default_match, 80extern const void *of_flat_dt_match_machine(const void *default_match,
@@ -84,6 +86,7 @@ extern void unflatten_and_copy_device_tree(void);
84extern void early_init_devtree(void *); 86extern void early_init_devtree(void *);
85extern void early_get_first_memblock_info(void *, phys_addr_t *); 87extern void early_get_first_memblock_info(void *, phys_addr_t *);
86extern u64 fdt_translate_address(const void *blob, int node_offset); 88extern u64 fdt_translate_address(const void *blob, int node_offset);
89extern void of_fdt_limit_memory(int limit);
87#else /* CONFIG_OF_FLATTREE */ 90#else /* CONFIG_OF_FLATTREE */
88static inline void early_init_fdt_scan_reserved_mem(void) {} 91static inline void early_init_fdt_scan_reserved_mem(void) {}
89static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } 92static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index a70c9493d55a..d449018d0726 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -25,9 +25,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
25 25
26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); 26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
27 27
28extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
29 struct phy_device *phydev);
30
31#else /* CONFIG_OF */ 28#else /* CONFIG_OF */
32static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) 29static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
33{ 30{
@@ -63,11 +60,6 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
63{ 60{
64 return NULL; 61 return NULL;
65} 62}
66
67static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
68 struct phy_device *phydev)
69{
70}
71#endif /* CONFIG_OF */ 63#endif /* CONFIG_OF */
72 64
73#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY) 65#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
new file mode 100644
index 000000000000..90230d5811c5
--- /dev/null
+++ b/include/linux/osq_lock.h
@@ -0,0 +1,27 @@
1#ifndef __LINUX_OSQ_LOCK_H
2#define __LINUX_OSQ_LOCK_H
3
4/*
5 * An MCS like lock especially tailored for optimistic spinning for sleeping
6 * lock implementations (mutex, rwsem, etc).
7 */
8
9#define OSQ_UNLOCKED_VAL (0)
10
11struct optimistic_spin_queue {
12 /*
13 * Stores an encoded value of the CPU # of the tail node in the queue.
14 * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
15 */
16 atomic_t tail;
17};
18
19/* Init macro and function. */
20#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
21
22static inline void osq_lock_init(struct optimistic_spin_queue *lock)
23{
24 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
25}
26
27#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0a97b583ee8d..e1474ae18c88 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -399,6 +399,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
399} 399}
400 400
401/* 401/*
402 * Get the offset in PAGE_SIZE.
403 * (TODO: hugepage should have ->index in PAGE_SIZE)
404 */
405static inline pgoff_t page_to_pgoff(struct page *page)
406{
407 if (unlikely(PageHeadHuge(page)))
408 return page->index << compound_order(page);
409 else
410 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
411}
412
413/*
402 * Return byte-offset into filesystem object for page. 414 * Return byte-offset into filesystem object for page.
403 */ 415 */
404static inline loff_t page_offset(struct page *page) 416static inline loff_t page_offset(struct page *page)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5a75d19aa661..6a94cc8b1ca0 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,7 +44,6 @@
44#include <linux/debugobjects.h> 44#include <linux/debugobjects.h>
45#include <linux/bug.h> 45#include <linux/bug.h>
46#include <linux/compiler.h> 46#include <linux/compiler.h>
47#include <linux/percpu.h>
48#include <asm/barrier.h> 47#include <asm/barrier.h>
49 48
50extern int rcu_expedited; /* for sysctl */ 49extern int rcu_expedited; /* for sysctl */
@@ -300,41 +299,6 @@ bool __rcu_is_watching(void);
300#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 299#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
301 300
302/* 301/*
303 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
304 */
305
306#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */
307DECLARE_PER_CPU(int, rcu_cond_resched_count);
308void rcu_resched(void);
309
310/*
311 * Is it time to report RCU quiescent states?
312 *
313 * Note unsynchronized access to rcu_cond_resched_count. Yes, we might
314 * increment some random CPU's count, and possibly also load the result from
315 * yet another CPU's count. We might even clobber some other CPU's attempt
316 * to zero its counter. This is all OK because the goal is not precision,
317 * but rather reasonable amortization of rcu_note_context_switch() overhead
318 * and extremely high probability of avoiding RCU CPU stall warnings.
319 * Note that this function has to be preempted in just the wrong place,
320 * many thousands of times in a row, for anything bad to happen.
321 */
322static inline bool rcu_should_resched(void)
323{
324 return raw_cpu_inc_return(rcu_cond_resched_count) >=
325 RCU_COND_RESCHED_LIM;
326}
327
328/*
329 * Report quiscent states to RCU if it is time to do so.
330 */
331static inline void rcu_cond_resched(void)
332{
333 if (unlikely(rcu_should_resched()))
334 rcu_resched();
335}
336
337/*
338 * Infrastructure to implement the synchronize_() primitives in 302 * Infrastructure to implement the synchronize_() primitives in
339 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 303 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
340 */ 304 */
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf);
358 * initialization. 322 * initialization.
359 */ 323 */
360#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 324#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
325void init_rcu_head(struct rcu_head *head);
326void destroy_rcu_head(struct rcu_head *head);
361void init_rcu_head_on_stack(struct rcu_head *head); 327void init_rcu_head_on_stack(struct rcu_head *head);
362void destroy_rcu_head_on_stack(struct rcu_head *head); 328void destroy_rcu_head_on_stack(struct rcu_head *head);
363#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 329#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
330static inline void init_rcu_head(struct rcu_head *head)
331{
332}
333
334static inline void destroy_rcu_head(struct rcu_head *head)
335{
336}
337
364static inline void init_rcu_head_on_stack(struct rcu_head *head) 338static inline void init_rcu_head_on_stack(struct rcu_head *head)
365{ 339{
366} 340}
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index d5b13bc07a0b..561e8615528d 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -15,13 +15,13 @@
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16/* 16/*
17 * the rw-semaphore definition 17 * the rw-semaphore definition
18 * - if activity is 0 then there are no active readers or writers 18 * - if count is 0 then there are no active readers or writers
19 * - if activity is +ve then that is the number of active readers 19 * - if count is +ve then that is the number of active readers
20 * - if activity is -1 then there is one active writer 20 * - if count is -1 then there is one active writer
21 * - if wait_list is not empty, then there are processes waiting for the semaphore 21 * - if wait_list is not empty, then there are processes waiting for the semaphore
22 */ 22 */
23struct rw_semaphore { 23struct rw_semaphore {
24 __s32 activity; 24 __s32 count;
25 raw_spinlock_t wait_lock; 25 raw_spinlock_t wait_lock;
26 struct list_head wait_list; 26 struct list_head wait_list;
27#ifdef CONFIG_DEBUG_LOCK_ALLOC 27#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8d79708146aa..035d3c57fc8a 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -13,10 +13,11 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16
17#include <linux/atomic.h> 16#include <linux/atomic.h>
17#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
18#include <linux/osq_lock.h>
19#endif
18 20
19struct optimistic_spin_queue;
20struct rw_semaphore; 21struct rw_semaphore;
21 22
22#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK 23#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -25,15 +26,15 @@ struct rw_semaphore;
25/* All arch specific implementations share the same struct */ 26/* All arch specific implementations share the same struct */
26struct rw_semaphore { 27struct rw_semaphore {
27 long count; 28 long count;
28 raw_spinlock_t wait_lock;
29 struct list_head wait_list; 29 struct list_head wait_list;
30#ifdef CONFIG_SMP 30 raw_spinlock_t wait_lock;
31#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
32 struct optimistic_spin_queue osq; /* spinner MCS lock */
31 /* 33 /*
32 * Write owner. Used as a speculative check to see 34 * Write owner. Used as a speculative check to see
33 * if the owner is running on the cpu. 35 * if the owner is running on the cpu.
34 */ 36 */
35 struct task_struct *owner; 37 struct task_struct *owner;
36 struct optimistic_spin_queue *osq; /* spinner MCS lock */
37#endif 38#endif
38#ifdef CONFIG_DEBUG_LOCK_ALLOC 39#ifdef CONFIG_DEBUG_LOCK_ALLOC
39 struct lockdep_map dep_map; 40 struct lockdep_map dep_map;
@@ -64,22 +65,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
64# define __RWSEM_DEP_MAP_INIT(lockname) 65# define __RWSEM_DEP_MAP_INIT(lockname)
65#endif 66#endif
66 67
67#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK) 68#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
68#define __RWSEM_INITIALIZER(name) \ 69#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
69 { RWSEM_UNLOCKED_VALUE, \
70 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
71 LIST_HEAD_INIT((name).wait_list), \
72 NULL, /* owner */ \
73 NULL /* mcs lock */ \
74 __RWSEM_DEP_MAP_INIT(name) }
75#else 70#else
76#define __RWSEM_INITIALIZER(name) \ 71#define __RWSEM_OPT_INIT(lockname)
77 { RWSEM_UNLOCKED_VALUE, \
78 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
79 LIST_HEAD_INIT((name).wait_list) \
80 __RWSEM_DEP_MAP_INIT(name) }
81#endif 72#endif
82 73
74#define __RWSEM_INITIALIZER(name) \
75 { .count = RWSEM_UNLOCKED_VALUE, \
76 .wait_list = LIST_HEAD_INIT((name).wait_list), \
77 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
78 __RWSEM_OPT_INIT(name) \
79 __RWSEM_DEP_MAP_INIT(name) }
80
83#define DECLARE_RWSEM(name) \ 81#define DECLARE_RWSEM(name) \
84 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 82 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
85 83
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0fd19055bb64..45cec6b70eaf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -872,21 +872,21 @@ enum cpu_idle_type {
872#define SD_NUMA 0x4000 /* cross-node balancing */ 872#define SD_NUMA 0x4000 /* cross-node balancing */
873 873
874#ifdef CONFIG_SCHED_SMT 874#ifdef CONFIG_SCHED_SMT
875static inline const int cpu_smt_flags(void) 875static inline int cpu_smt_flags(void)
876{ 876{
877 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 877 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
878} 878}
879#endif 879#endif
880 880
881#ifdef CONFIG_SCHED_MC 881#ifdef CONFIG_SCHED_MC
882static inline const int cpu_core_flags(void) 882static inline int cpu_core_flags(void)
883{ 883{
884 return SD_SHARE_PKG_RESOURCES; 884 return SD_SHARE_PKG_RESOURCES;
885} 885}
886#endif 886#endif
887 887
888#ifdef CONFIG_NUMA 888#ifdef CONFIG_NUMA
889static inline const int cpu_numa_flags(void) 889static inline int cpu_numa_flags(void)
890{ 890{
891 return SD_NUMA; 891 return SD_NUMA;
892} 892}
@@ -999,7 +999,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
999bool cpus_share_cache(int this_cpu, int that_cpu); 999bool cpus_share_cache(int this_cpu, int that_cpu);
1000 1000
1001typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 1001typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1002typedef const int (*sched_domain_flags_f)(void); 1002typedef int (*sched_domain_flags_f)(void);
1003 1003
1004#define SDTL_OVERLAP 0x01 1004#define SDTL_OVERLAP 0x01
1005 1005
diff --git a/include/net/ip.h b/include/net/ip.h
index 0e795df05ec9..7596eb22e1ce 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -309,16 +309,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
309 } 309 }
310} 310}
311 311
312#define IP_IDENTS_SZ 2048u 312u32 ip_idents_reserve(u32 hash, int segs);
313extern atomic_t *ip_idents;
314
315static inline u32 ip_idents_reserve(u32 hash, int segs)
316{
317 atomic_t *id_ptr = ip_idents + hash % IP_IDENTS_SZ;
318
319 return atomic_add_return(segs, id_ptr) - segs;
320}
321
322void __ip_select_ident(struct iphdr *iph, int segs); 313void __ip_select_ident(struct iphdr *iph, int segs);
323 314
324static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs) 315static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 7277caf3743d..47f425464f84 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -203,7 +203,6 @@ struct neigh_table {
203 void (*proxy_redo)(struct sk_buff *skb); 203 void (*proxy_redo)(struct sk_buff *skb);
204 char *id; 204 char *id;
205 struct neigh_parms parms; 205 struct neigh_parms parms;
206 /* HACK. gc_* should follow parms without a gap! */
207 int gc_interval; 206 int gc_interval;
208 int gc_thresh1; 207 int gc_thresh1;
209 int gc_thresh2; 208 int gc_thresh2;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 713b0b88bd5a..c4d86198d3d6 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -6,6 +6,7 @@
6#include <linux/netfilter/nfnetlink.h> 6#include <linux/netfilter/nfnetlink.h>
7#include <linux/netfilter/x_tables.h> 7#include <linux/netfilter/x_tables.h>
8#include <linux/netfilter/nf_tables.h> 8#include <linux/netfilter/nf_tables.h>
9#include <linux/u64_stats_sync.h>
9#include <net/netlink.h> 10#include <net/netlink.h>
10 11
11#define NFT_JUMP_STACK_SIZE 16 12#define NFT_JUMP_STACK_SIZE 16
@@ -528,8 +529,9 @@ enum nft_chain_type {
528}; 529};
529 530
530struct nft_stats { 531struct nft_stats {
531 u64 bytes; 532 u64 bytes;
532 u64 pkts; 533 u64 pkts;
534 struct u64_stats_sync syncp;
533}; 535};
534 536
535#define NFT_HOOK_OPS_MAX 2 537#define NFT_HOOK_OPS_MAX 2
diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h
index 079030c853d8..e2070960bac0 100644
--- a/include/net/netns/ieee802154_6lowpan.h
+++ b/include/net/netns/ieee802154_6lowpan.h
@@ -16,7 +16,7 @@ struct netns_sysctl_lowpan {
16struct netns_ieee802154_lowpan { 16struct netns_ieee802154_lowpan {
17 struct netns_sysctl_lowpan sysctl; 17 struct netns_sysctl_lowpan sysctl;
18 struct netns_frags frags; 18 struct netns_frags frags;
19 u16 max_dsize; 19 int max_dsize;
20}; 20};
21 21
22#endif 22#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 26a394cb91a8..eee608b12cc9 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -13,8 +13,8 @@ struct netns_nftables {
13 struct nft_af_info *inet; 13 struct nft_af_info *inet;
14 struct nft_af_info *arp; 14 struct nft_af_info *arp;
15 struct nft_af_info *bridge; 15 struct nft_af_info *bridge;
16 unsigned int base_seq;
16 u8 gencursor; 17 u8 gencursor;
17 u8 genctr;
18}; 18};
19 19
20#endif 20#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 173cae485de1..156350745700 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1768,9 +1768,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1768static inline void 1768static inline void
1769sk_dst_set(struct sock *sk, struct dst_entry *dst) 1769sk_dst_set(struct sock *sk, struct dst_entry *dst)
1770{ 1770{
1771 spin_lock(&sk->sk_dst_lock); 1771 struct dst_entry *old_dst;
1772 __sk_dst_set(sk, dst); 1772
1773 spin_unlock(&sk->sk_dst_lock); 1773 sk_tx_queue_clear(sk);
1774 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
1775 dst_release(old_dst);
1774} 1776}
1775 1777
1776static inline void 1778static inline void
@@ -1782,9 +1784,7 @@ __sk_dst_reset(struct sock *sk)
1782static inline void 1784static inline void
1783sk_dst_reset(struct sock *sk) 1785sk_dst_reset(struct sock *sk)
1784{ 1786{
1785 spin_lock(&sk->sk_dst_lock); 1787 sk_dst_set(sk, NULL);
1786 __sk_dst_reset(sk);
1787 spin_unlock(&sk->sk_dst_lock);
1788} 1788}
1789 1789
1790struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1790struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 40b5ca8a1b1f..25084a052a1e 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -101,6 +101,7 @@
101 * - add FATTR_CTIME 101 * - add FATTR_CTIME
102 * - add ctime and ctimensec to fuse_setattr_in 102 * - add ctime and ctimensec to fuse_setattr_in
103 * - add FUSE_RENAME2 request 103 * - add FUSE_RENAME2 request
104 * - add FUSE_NO_OPEN_SUPPORT flag
104 */ 105 */
105 106
106#ifndef _LINUX_FUSE_H 107#ifndef _LINUX_FUSE_H
@@ -229,6 +230,7 @@ struct fuse_file_lock {
229 * FUSE_READDIRPLUS_AUTO: adaptive readdirplus 230 * FUSE_READDIRPLUS_AUTO: adaptive readdirplus
230 * FUSE_ASYNC_DIO: asynchronous direct I/O submission 231 * FUSE_ASYNC_DIO: asynchronous direct I/O submission
231 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes 232 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
233 * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
232 */ 234 */
233#define FUSE_ASYNC_READ (1 << 0) 235#define FUSE_ASYNC_READ (1 << 0)
234#define FUSE_POSIX_LOCKS (1 << 1) 236#define FUSE_POSIX_LOCKS (1 << 1)
@@ -247,6 +249,7 @@ struct fuse_file_lock {
247#define FUSE_READDIRPLUS_AUTO (1 << 14) 249#define FUSE_READDIRPLUS_AUTO (1 << 14)
248#define FUSE_ASYNC_DIO (1 << 15) 250#define FUSE_ASYNC_DIO (1 << 15)
249#define FUSE_WRITEBACK_CACHE (1 << 16) 251#define FUSE_WRITEBACK_CACHE (1 << 16)
252#define FUSE_NO_OPEN_SUPPORT (1 << 17)
250 253
251/** 254/**
252 * CUSE INIT request/reply flags 255 * CUSE INIT request/reply flags
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index a5af2a26d94f..5c1aba154b64 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -170,6 +170,7 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
170 unmap->dev_bus_addr = 0; 170 unmap->dev_bus_addr = 0;
171} 171}
172 172
173int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
173int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes, 174int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
174 unsigned long max_nr_gframes, 175 unsigned long max_nr_gframes,
175 void **__shared); 176 void **__shared);
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 35536d9c0964..76768ee812b2 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -220,9 +220,16 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
220 220
221endif 221endif
222 222
223config ARCH_SUPPORTS_ATOMIC_RMW
224 bool
225
223config MUTEX_SPIN_ON_OWNER 226config MUTEX_SPIN_ON_OWNER
224 def_bool y 227 def_bool y
225 depends on SMP && !DEBUG_MUTEXES 228 depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
229
230config RWSEM_SPIN_ON_OWNER
231 def_bool y
232 depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
226 233
227config ARCH_USE_QUEUE_RWLOCK 234config ARCH_USE_QUEUE_RWLOCK
228 bool 235 bool
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a33d9a2bcbd7..6b17ac1b0c2a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2320,7 +2320,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2320 next_parent = rcu_dereference(next_ctx->parent_ctx); 2320 next_parent = rcu_dereference(next_ctx->parent_ctx);
2321 2321
2322 /* If neither context have a parent context; they cannot be clones. */ 2322 /* If neither context have a parent context; they cannot be clones. */
2323 if (!parent && !next_parent) 2323 if (!parent || !next_parent)
2324 goto unlock; 2324 goto unlock;
2325 2325
2326 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { 2326 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
@@ -7458,7 +7458,19 @@ __perf_event_exit_task(struct perf_event *child_event,
7458 struct perf_event_context *child_ctx, 7458 struct perf_event_context *child_ctx,
7459 struct task_struct *child) 7459 struct task_struct *child)
7460{ 7460{
7461 perf_remove_from_context(child_event, true); 7461 /*
7462 * Do not destroy the 'original' grouping; because of the context
7463 * switch optimization the original events could've ended up in a
7464 * random child task.
7465 *
7466 * If we were to destroy the original group, all group related
7467 * operations would cease to function properly after this random
7468 * child dies.
7469 *
7470 * Do destroy all inherited groups, we don't care about those
7471 * and being thorough is better.
7472 */
7473 perf_remove_from_context(child_event, !!child_event->parent);
7462 7474
7463 /* 7475 /*
7464 * It can happen that the parent exits first, and has events 7476 * It can happen that the parent exits first, and has events
@@ -7474,7 +7486,7 @@ __perf_event_exit_task(struct perf_event *child_event,
7474static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 7486static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7475{ 7487{
7476 struct perf_event *child_event, *next; 7488 struct perf_event *child_event, *next;
7477 struct perf_event_context *child_ctx; 7489 struct perf_event_context *child_ctx, *parent_ctx;
7478 unsigned long flags; 7490 unsigned long flags;
7479 7491
7480 if (likely(!child->perf_event_ctxp[ctxn])) { 7492 if (likely(!child->perf_event_ctxp[ctxn])) {
@@ -7499,6 +7511,15 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7499 raw_spin_lock(&child_ctx->lock); 7511 raw_spin_lock(&child_ctx->lock);
7500 task_ctx_sched_out(child_ctx); 7512 task_ctx_sched_out(child_ctx);
7501 child->perf_event_ctxp[ctxn] = NULL; 7513 child->perf_event_ctxp[ctxn] = NULL;
7514
7515 /*
7516 * In order to avoid freeing: child_ctx->parent_ctx->task
7517 * under perf_event_context::lock, grab another reference.
7518 */
7519 parent_ctx = child_ctx->parent_ctx;
7520 if (parent_ctx)
7521 get_ctx(parent_ctx);
7522
7502 /* 7523 /*
7503 * If this context is a clone; unclone it so it can't get 7524 * If this context is a clone; unclone it so it can't get
7504 * swapped to another process while we're removing all 7525 * swapped to another process while we're removing all
@@ -7509,6 +7530,13 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7509 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 7530 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7510 7531
7511 /* 7532 /*
7533 * Now that we no longer hold perf_event_context::lock, drop
7534 * our extra child_ctx->parent_ctx reference.
7535 */
7536 if (parent_ctx)
7537 put_ctx(parent_ctx);
7538
7539 /*
7512 * Report the task dead after unscheduling the events so that we 7540 * Report the task dead after unscheduling the events so that we
7513 * won't get any samples after PERF_RECORD_EXIT. We can however still 7541 * won't get any samples after PERF_RECORD_EXIT. We can however still
7514 * get a few PERF_RECORD_READ events. 7542 * get a few PERF_RECORD_READ events.
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 369f41a94124..4b8f0c925884 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -33,6 +33,7 @@
33#include <linux/swap.h> 33#include <linux/swap.h>
34#include <linux/syscore_ops.h> 34#include <linux/syscore_ops.h>
35#include <linux/compiler.h> 35#include <linux/compiler.h>
36#include <linux/hugetlb.h>
36 37
37#include <asm/page.h> 38#include <asm/page.h>
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
@@ -1619,6 +1620,9 @@ static int __init crash_save_vmcoreinfo_init(void)
1619#endif 1620#endif
1620 VMCOREINFO_NUMBER(PG_head_mask); 1621 VMCOREINFO_NUMBER(PG_head_mask);
1621 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); 1622 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1623#ifdef CONFIG_HUGETLBFS
1624 VMCOREINFO_SYMBOL(free_huge_page);
1625#endif
1622 1626
1623 arch_crash_save_vmcoreinfo(); 1627 arch_crash_save_vmcoreinfo();
1624 update_vmcoreinfo_note(); 1628 update_vmcoreinfo_note();
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3214289df5a7..734e9a7d280b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2037,19 +2037,23 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
2037{ 2037{
2038 unsigned long *iter; 2038 unsigned long *iter;
2039 struct kprobe_blacklist_entry *ent; 2039 struct kprobe_blacklist_entry *ent;
2040 unsigned long offset = 0, size = 0; 2040 unsigned long entry, offset = 0, size = 0;
2041 2041
2042 for (iter = start; iter < end; iter++) { 2042 for (iter = start; iter < end; iter++) {
2043 if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) { 2043 entry = arch_deref_entry_point((void *)*iter);
2044 pr_err("Failed to find blacklist %p\n", (void *)*iter); 2044
2045 if (!kernel_text_address(entry) ||
2046 !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2047 pr_err("Failed to find blacklist at %p\n",
2048 (void *)entry);
2045 continue; 2049 continue;
2046 } 2050 }
2047 2051
2048 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 2052 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2049 if (!ent) 2053 if (!ent)
2050 return -ENOMEM; 2054 return -ENOMEM;
2051 ent->start_addr = *iter; 2055 ent->start_addr = entry;
2052 ent->end_addr = *iter + size; 2056 ent->end_addr = entry + size;
2053 INIT_LIST_HEAD(&ent->list); 2057 INIT_LIST_HEAD(&ent->list);
2054 list_add_tail(&ent->list, &kprobe_blacklist); 2058 list_add_tail(&ent->list, &kprobe_blacklist);
2055 } 2059 }
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
index 838dc9e00669..be9ee1559fca 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/mcs_spinlock.c
@@ -14,21 +14,47 @@
14 * called from interrupt context and we have preemption disabled while 14 * called from interrupt context and we have preemption disabled while
15 * spinning. 15 * spinning.
16 */ 16 */
17static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node); 17static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
18
19/*
20 * We use the value 0 to represent "no CPU", thus the encoded value
21 * will be the CPU number incremented by 1.
22 */
23static inline int encode_cpu(int cpu_nr)
24{
25 return cpu_nr + 1;
26}
27
28static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
29{
30 int cpu_nr = encoded_cpu_val - 1;
31
32 return per_cpu_ptr(&osq_node, cpu_nr);
33}
18 34
19/* 35/*
20 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. 36 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
21 * Can return NULL in case we were the last queued and we updated @lock instead. 37 * Can return NULL in case we were the last queued and we updated @lock instead.
22 */ 38 */
23static inline struct optimistic_spin_queue * 39static inline struct optimistic_spin_node *
24osq_wait_next(struct optimistic_spin_queue **lock, 40osq_wait_next(struct optimistic_spin_queue *lock,
25 struct optimistic_spin_queue *node, 41 struct optimistic_spin_node *node,
26 struct optimistic_spin_queue *prev) 42 struct optimistic_spin_node *prev)
27{ 43{
28 struct optimistic_spin_queue *next = NULL; 44 struct optimistic_spin_node *next = NULL;
45 int curr = encode_cpu(smp_processor_id());
46 int old;
47
48 /*
49 * If there is a prev node in queue, then the 'old' value will be
50 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
51 * we're currently last in queue, then the queue will then become empty.
52 */
53 old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
29 54
30 for (;;) { 55 for (;;) {
31 if (*lock == node && cmpxchg(lock, node, prev) == node) { 56 if (atomic_read(&lock->tail) == curr &&
57 atomic_cmpxchg(&lock->tail, curr, old) == curr) {
32 /* 58 /*
33 * We were the last queued, we moved @lock back. @prev 59 * We were the last queued, we moved @lock back. @prev
34 * will now observe @lock and will complete its 60 * will now observe @lock and will complete its
@@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_queue **lock,
59 return next; 85 return next;
60} 86}
61 87
62bool osq_lock(struct optimistic_spin_queue **lock) 88bool osq_lock(struct optimistic_spin_queue *lock)
63{ 89{
64 struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); 90 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
65 struct optimistic_spin_queue *prev, *next; 91 struct optimistic_spin_node *prev, *next;
92 int curr = encode_cpu(smp_processor_id());
93 int old;
66 94
67 node->locked = 0; 95 node->locked = 0;
68 node->next = NULL; 96 node->next = NULL;
97 node->cpu = curr;
69 98
70 node->prev = prev = xchg(lock, node); 99 old = atomic_xchg(&lock->tail, curr);
71 if (likely(prev == NULL)) 100 if (old == OSQ_UNLOCKED_VAL)
72 return true; 101 return true;
73 102
103 prev = decode_cpu(old);
104 node->prev = prev;
74 ACCESS_ONCE(prev->next) = node; 105 ACCESS_ONCE(prev->next) = node;
75 106
76 /* 107 /*
@@ -149,20 +180,21 @@ unqueue:
149 return false; 180 return false;
150} 181}
151 182
152void osq_unlock(struct optimistic_spin_queue **lock) 183void osq_unlock(struct optimistic_spin_queue *lock)
153{ 184{
154 struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); 185 struct optimistic_spin_node *node, *next;
155 struct optimistic_spin_queue *next; 186 int curr = encode_cpu(smp_processor_id());
156 187
157 /* 188 /*
158 * Fast path for the uncontended case. 189 * Fast path for the uncontended case.
159 */ 190 */
160 if (likely(cmpxchg(lock, node, NULL) == node)) 191 if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
161 return; 192 return;
162 193
163 /* 194 /*
164 * Second most likely case. 195 * Second most likely case.
165 */ 196 */
197 node = this_cpu_ptr(&osq_node);
166 next = xchg(&node->next, NULL); 198 next = xchg(&node->next, NULL);
167 if (next) { 199 if (next) {
168 ACCESS_ONCE(next->locked) = 1; 200 ACCESS_ONCE(next->locked) = 1;
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index a2dbac4aca6b..74356dc0ce29 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -118,12 +118,13 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
118 * mutex_lock()/rwsem_down_{read,write}() etc. 118 * mutex_lock()/rwsem_down_{read,write}() etc.
119 */ 119 */
120 120
121struct optimistic_spin_queue { 121struct optimistic_spin_node {
122 struct optimistic_spin_queue *next, *prev; 122 struct optimistic_spin_node *next, *prev;
123 int locked; /* 1 if lock acquired */ 123 int locked; /* 1 if lock acquired */
124 int cpu; /* encoded CPU # value */
124}; 125};
125 126
126extern bool osq_lock(struct optimistic_spin_queue **lock); 127extern bool osq_lock(struct optimistic_spin_queue *lock);
127extern void osq_unlock(struct optimistic_spin_queue **lock); 128extern void osq_unlock(struct optimistic_spin_queue *lock);
128 129
129#endif /* __LINUX_MCS_SPINLOCK_H */ 130#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index bc73d33c6760..acca2c1a3c5e 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -60,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
60 INIT_LIST_HEAD(&lock->wait_list); 60 INIT_LIST_HEAD(&lock->wait_list);
61 mutex_clear_owner(lock); 61 mutex_clear_owner(lock);
62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
63 lock->osq = NULL; 63 osq_lock_init(&lock->osq);
64#endif 64#endif
65 65
66 debug_mutex_init(lock, name, key); 66 debug_mutex_init(lock, name, key);
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 9be8a9144978..2c93571162cb 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -26,7 +26,7 @@ int rwsem_is_locked(struct rw_semaphore *sem)
26 unsigned long flags; 26 unsigned long flags;
27 27
28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { 28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
29 ret = (sem->activity != 0); 29 ret = (sem->count != 0);
30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
31 } 31 }
32 return ret; 32 return ret;
@@ -46,7 +46,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
46 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); 46 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
47 lockdep_init_map(&sem->dep_map, name, key, 0); 47 lockdep_init_map(&sem->dep_map, name, key, 0);
48#endif 48#endif
49 sem->activity = 0; 49 sem->count = 0;
50 raw_spin_lock_init(&sem->wait_lock); 50 raw_spin_lock_init(&sem->wait_lock);
51 INIT_LIST_HEAD(&sem->wait_list); 51 INIT_LIST_HEAD(&sem->wait_list);
52} 52}
@@ -95,7 +95,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
95 waiter = list_entry(next, struct rwsem_waiter, list); 95 waiter = list_entry(next, struct rwsem_waiter, list);
96 } while (waiter->type != RWSEM_WAITING_FOR_WRITE); 96 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
97 97
98 sem->activity += woken; 98 sem->count += woken;
99 99
100 out: 100 out:
101 return sem; 101 return sem;
@@ -126,9 +126,9 @@ void __sched __down_read(struct rw_semaphore *sem)
126 126
127 raw_spin_lock_irqsave(&sem->wait_lock, flags); 127 raw_spin_lock_irqsave(&sem->wait_lock, flags);
128 128
129 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 129 if (sem->count >= 0 && list_empty(&sem->wait_list)) {
130 /* granted */ 130 /* granted */
131 sem->activity++; 131 sem->count++;
132 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 132 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
133 goto out; 133 goto out;
134 } 134 }
@@ -170,9 +170,9 @@ int __down_read_trylock(struct rw_semaphore *sem)
170 170
171 raw_spin_lock_irqsave(&sem->wait_lock, flags); 171 raw_spin_lock_irqsave(&sem->wait_lock, flags);
172 172
173 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 173 if (sem->count >= 0 && list_empty(&sem->wait_list)) {
174 /* granted */ 174 /* granted */
175 sem->activity++; 175 sem->count++;
176 ret = 1; 176 ret = 1;
177 } 177 }
178 178
@@ -206,7 +206,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
206 * itself into sleep and waiting for system woke it or someone 206 * itself into sleep and waiting for system woke it or someone
207 * else in the head of the wait list up. 207 * else in the head of the wait list up.
208 */ 208 */
209 if (sem->activity == 0) 209 if (sem->count == 0)
210 break; 210 break;
211 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 211 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
212 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 212 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -214,7 +214,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
214 raw_spin_lock_irqsave(&sem->wait_lock, flags); 214 raw_spin_lock_irqsave(&sem->wait_lock, flags);
215 } 215 }
216 /* got the lock */ 216 /* got the lock */
217 sem->activity = -1; 217 sem->count = -1;
218 list_del(&waiter.list); 218 list_del(&waiter.list);
219 219
220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -235,9 +235,9 @@ int __down_write_trylock(struct rw_semaphore *sem)
235 235
236 raw_spin_lock_irqsave(&sem->wait_lock, flags); 236 raw_spin_lock_irqsave(&sem->wait_lock, flags);
237 237
238 if (sem->activity == 0) { 238 if (sem->count == 0) {
239 /* got the lock */ 239 /* got the lock */
240 sem->activity = -1; 240 sem->count = -1;
241 ret = 1; 241 ret = 1;
242 } 242 }
243 243
@@ -255,7 +255,7 @@ void __up_read(struct rw_semaphore *sem)
255 255
256 raw_spin_lock_irqsave(&sem->wait_lock, flags); 256 raw_spin_lock_irqsave(&sem->wait_lock, flags);
257 257
258 if (--sem->activity == 0 && !list_empty(&sem->wait_list)) 258 if (--sem->count == 0 && !list_empty(&sem->wait_list))
259 sem = __rwsem_wake_one_writer(sem); 259 sem = __rwsem_wake_one_writer(sem);
260 260
261 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 261 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -270,7 +270,7 @@ void __up_write(struct rw_semaphore *sem)
270 270
271 raw_spin_lock_irqsave(&sem->wait_lock, flags); 271 raw_spin_lock_irqsave(&sem->wait_lock, flags);
272 272
273 sem->activity = 0; 273 sem->count = 0;
274 if (!list_empty(&sem->wait_list)) 274 if (!list_empty(&sem->wait_list))
275 sem = __rwsem_do_wake(sem, 1); 275 sem = __rwsem_do_wake(sem, 1);
276 276
@@ -287,7 +287,7 @@ void __downgrade_write(struct rw_semaphore *sem)
287 287
288 raw_spin_lock_irqsave(&sem->wait_lock, flags); 288 raw_spin_lock_irqsave(&sem->wait_lock, flags);
289 289
290 sem->activity = 1; 290 sem->count = 1;
291 if (!list_empty(&sem->wait_list)) 291 if (!list_empty(&sem->wait_list))
292 sem = __rwsem_do_wake(sem, 0); 292 sem = __rwsem_do_wake(sem, 0);
293 293
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index dacc32142fcc..a2391ac135c8 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -82,9 +82,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
82 sem->count = RWSEM_UNLOCKED_VALUE; 82 sem->count = RWSEM_UNLOCKED_VALUE;
83 raw_spin_lock_init(&sem->wait_lock); 83 raw_spin_lock_init(&sem->wait_lock);
84 INIT_LIST_HEAD(&sem->wait_list); 84 INIT_LIST_HEAD(&sem->wait_list);
85#ifdef CONFIG_SMP 85#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
86 sem->owner = NULL; 86 sem->owner = NULL;
87 sem->osq = NULL; 87 osq_lock_init(&sem->osq);
88#endif 88#endif
89} 89}
90 90
@@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
262 return false; 262 return false;
263} 263}
264 264
265#ifdef CONFIG_SMP 265#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
266/* 266/*
267 * Try to acquire write lock before the writer has been put on wait queue. 267 * Try to acquire write lock before the writer has been put on wait queue.
268 */ 268 */
@@ -285,10 +285,10 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
285static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) 285static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
286{ 286{
287 struct task_struct *owner; 287 struct task_struct *owner;
288 bool on_cpu = true; 288 bool on_cpu = false;
289 289
290 if (need_resched()) 290 if (need_resched())
291 return 0; 291 return false;
292 292
293 rcu_read_lock(); 293 rcu_read_lock();
294 owner = ACCESS_ONCE(sem->owner); 294 owner = ACCESS_ONCE(sem->owner);
@@ -297,9 +297,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
297 rcu_read_unlock(); 297 rcu_read_unlock();
298 298
299 /* 299 /*
300 * If sem->owner is not set, the rwsem owner may have 300 * If sem->owner is not set, yet we have just recently entered the
301 * just acquired it and not set the owner yet or the rwsem 301 * slowpath, then there is a possibility reader(s) may have the lock.
302 * has been released. 302 * To be safe, avoid spinning in these situations.
303 */ 303 */
304 return on_cpu; 304 return on_cpu;
305} 305}
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 42f806de49d4..e2d3bc7f03b4 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/atomic.h> 13#include <linux/atomic.h>
14 14
15#if defined(CONFIG_SMP) && defined(CONFIG_RWSEM_XCHGADD_ALGORITHM) 15#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
16static inline void rwsem_set_owner(struct rw_semaphore *sem) 16static inline void rwsem_set_owner(struct rw_semaphore *sem)
17{ 17{
18 sem->owner = current; 18 sem->owner = current;
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0ca8d83e2369..4ee194eb524b 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -186,6 +186,7 @@ void thaw_processes(void)
186 186
187 printk("Restarting tasks ... "); 187 printk("Restarting tasks ... ");
188 188
189 __usermodehelper_set_disable_depth(UMH_FREEZING);
189 thaw_workqueues(); 190 thaw_workqueues();
190 191
191 read_lock(&tasklist_lock); 192 read_lock(&tasklist_lock);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4dd8822f732a..ed35a4790afe 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -306,7 +306,7 @@ int suspend_devices_and_enter(suspend_state_t state)
306 error = suspend_ops->begin(state); 306 error = suspend_ops->begin(state);
307 if (error) 307 if (error)
308 goto Close; 308 goto Close;
309 } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) { 309 } else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) {
310 error = freeze_ops->begin(); 310 error = freeze_ops->begin();
311 if (error) 311 if (error)
312 goto Close; 312 goto Close;
@@ -335,7 +335,7 @@ int suspend_devices_and_enter(suspend_state_t state)
335 Close: 335 Close:
336 if (need_suspend_ops(state) && suspend_ops->end) 336 if (need_suspend_ops(state) && suspend_ops->end)
337 suspend_ops->end(); 337 suspend_ops->end();
338 else if (state == PM_SUSPEND_FREEZE && freeze_ops->end) 338 else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
339 freeze_ops->end(); 339 freeze_ops->end();
340 340
341 return error; 341 return error;
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 7fa34f86e5ba..948a7693748e 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -18,7 +18,7 @@
18 * Copyright (C) IBM Corporation, 2005, 2006 18 * Copyright (C) IBM Corporation, 2005, 2006
19 * 19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org> 21 * Josh Triplett <josh@joshtriplett.org>
22 * 22 *
23 * See also: Documentation/RCU/torture.txt 23 * See also: Documentation/RCU/torture.txt
24 */ 24 */
@@ -51,7 +51,7 @@
51#include <linux/torture.h> 51#include <linux/torture.h>
52 52
53MODULE_LICENSE("GPL"); 53MODULE_LICENSE("GPL");
54MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>"); 54MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
55 55
56 56
57torture_param(int, fqs_duration, 0, 57torture_param(int, fqs_duration, 0,
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f1ba77363fbb..625d0b0cd75a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -206,6 +206,70 @@ void rcu_bh_qs(int cpu)
206 rdp->passed_quiesce = 1; 206 rdp->passed_quiesce = 1;
207} 207}
208 208
209static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
210
211static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
212 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
213 .dynticks = ATOMIC_INIT(1),
214#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
215 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
216 .dynticks_idle = ATOMIC_INIT(1),
217#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
218};
219
220/*
221 * Let the RCU core know that this CPU has gone through the scheduler,
222 * which is a quiescent state. This is called when the need for a
223 * quiescent state is urgent, so we burn an atomic operation and full
224 * memory barriers to let the RCU core know about it, regardless of what
225 * this CPU might (or might not) do in the near future.
226 *
227 * We inform the RCU core by emulating a zero-duration dyntick-idle
228 * period, which we in turn do by incrementing the ->dynticks counter
229 * by two.
230 */
231static void rcu_momentary_dyntick_idle(void)
232{
233 unsigned long flags;
234 struct rcu_data *rdp;
235 struct rcu_dynticks *rdtp;
236 int resched_mask;
237 struct rcu_state *rsp;
238
239 local_irq_save(flags);
240
241 /*
242 * Yes, we can lose flag-setting operations. This is OK, because
243 * the flag will be set again after some delay.
244 */
245 resched_mask = raw_cpu_read(rcu_sched_qs_mask);
246 raw_cpu_write(rcu_sched_qs_mask, 0);
247
248 /* Find the flavor that needs a quiescent state. */
249 for_each_rcu_flavor(rsp) {
250 rdp = raw_cpu_ptr(rsp->rda);
251 if (!(resched_mask & rsp->flavor_mask))
252 continue;
253 smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
254 if (ACCESS_ONCE(rdp->mynode->completed) !=
255 ACCESS_ONCE(rdp->cond_resched_completed))
256 continue;
257
258 /*
259 * Pretend to be momentarily idle for the quiescent state.
260 * This allows the grace-period kthread to record the
261 * quiescent state, with no need for this CPU to do anything
262 * further.
263 */
264 rdtp = this_cpu_ptr(&rcu_dynticks);
265 smp_mb__before_atomic(); /* Earlier stuff before QS. */
266 atomic_add(2, &rdtp->dynticks); /* QS. */
267 smp_mb__after_atomic(); /* Later stuff after QS. */
268 break;
269 }
270 local_irq_restore(flags);
271}
272
209/* 273/*
210 * Note a context switch. This is a quiescent state for RCU-sched, 274 * Note a context switch. This is a quiescent state for RCU-sched,
211 * and requires special handling for preemptible RCU. 275 * and requires special handling for preemptible RCU.
@@ -216,19 +280,12 @@ void rcu_note_context_switch(int cpu)
216 trace_rcu_utilization(TPS("Start context switch")); 280 trace_rcu_utilization(TPS("Start context switch"));
217 rcu_sched_qs(cpu); 281 rcu_sched_qs(cpu);
218 rcu_preempt_note_context_switch(cpu); 282 rcu_preempt_note_context_switch(cpu);
283 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
284 rcu_momentary_dyntick_idle();
219 trace_rcu_utilization(TPS("End context switch")); 285 trace_rcu_utilization(TPS("End context switch"));
220} 286}
221EXPORT_SYMBOL_GPL(rcu_note_context_switch); 287EXPORT_SYMBOL_GPL(rcu_note_context_switch);
222 288
223static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
224 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
225 .dynticks = ATOMIC_INIT(1),
226#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
227 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
228 .dynticks_idle = ATOMIC_INIT(1),
229#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
230};
231
232static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ 289static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
233static long qhimark = 10000; /* If this many pending, ignore blimit. */ 290static long qhimark = 10000; /* If this many pending, ignore blimit. */
234static long qlowmark = 100; /* Once only this many pending, use blimit. */ 291static long qlowmark = 100; /* Once only this many pending, use blimit. */
@@ -243,6 +300,13 @@ static ulong jiffies_till_next_fqs = ULONG_MAX;
243module_param(jiffies_till_first_fqs, ulong, 0644); 300module_param(jiffies_till_first_fqs, ulong, 0644);
244module_param(jiffies_till_next_fqs, ulong, 0644); 301module_param(jiffies_till_next_fqs, ulong, 0644);
245 302
303/*
304 * How long the grace period must be before we start recruiting
305 * quiescent-state help from rcu_note_context_switch().
306 */
307static ulong jiffies_till_sched_qs = HZ / 20;
308module_param(jiffies_till_sched_qs, ulong, 0644);
309
246static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, 310static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
247 struct rcu_data *rdp); 311 struct rcu_data *rdp);
248static void force_qs_rnp(struct rcu_state *rsp, 312static void force_qs_rnp(struct rcu_state *rsp,
@@ -853,6 +917,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
853 bool *isidle, unsigned long *maxj) 917 bool *isidle, unsigned long *maxj)
854{ 918{
855 unsigned int curr; 919 unsigned int curr;
920 int *rcrmp;
856 unsigned int snap; 921 unsigned int snap;
857 922
858 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks); 923 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
@@ -893,27 +958,43 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
893 } 958 }
894 959
895 /* 960 /*
896 * There is a possibility that a CPU in adaptive-ticks state 961 * A CPU running for an extended time within the kernel can
897 * might run in the kernel with the scheduling-clock tick disabled 962 * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
898 * for an extended time period. Invoke rcu_kick_nohz_cpu() to 963 * even context-switching back and forth between a pair of
899 * force the CPU to restart the scheduling-clock tick in this 964 * in-kernel CPU-bound tasks cannot advance grace periods.
900 * CPU is in this state. 965 * So if the grace period is old enough, make the CPU pay attention.
901 */ 966 * Note that the unsynchronized assignments to the per-CPU
902 rcu_kick_nohz_cpu(rdp->cpu); 967 * rcu_sched_qs_mask variable are safe. Yes, setting of
903 968 * bits can be lost, but they will be set again on the next
904 /* 969 * force-quiescent-state pass. So lost bit sets do not result
905 * Alternatively, the CPU might be running in the kernel 970 * in incorrect behavior, merely in a grace period lasting
906 * for an extended period of time without a quiescent state. 971 * a few jiffies longer than it might otherwise. Because
907 * Attempt to force the CPU through the scheduler to gain the 972 * there are at most four threads involved, and because the
908 * needed quiescent state, but only if the grace period has gone 973 * updates are only once every few jiffies, the probability of
909 * on for an uncommonly long time. If there are many stuck CPUs, 974 * lossage (and thus of slight grace-period extension) is
910 * we will beat on the first one until it gets unstuck, then move 975 * quite low.
911 * to the next. Only do this for the primary flavor of RCU. 976 *
977 * Note that if the jiffies_till_sched_qs boot/sysfs parameter
978 * is set too high, we override with half of the RCU CPU stall
979 * warning delay.
912 */ 980 */
913 if (rdp->rsp == rcu_state_p && 981 rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
982 if (ULONG_CMP_GE(jiffies,
983 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
914 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { 984 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
915 rdp->rsp->jiffies_resched += 5; 985 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
916 resched_cpu(rdp->cpu); 986 ACCESS_ONCE(rdp->cond_resched_completed) =
987 ACCESS_ONCE(rdp->mynode->completed);
988 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
989 ACCESS_ONCE(*rcrmp) =
990 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
991 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
992 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
993 } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
994 /* Time to beat on that CPU again! */
995 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
996 rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
997 }
917 } 998 }
918 999
919 return 0; 1000 return 0;
@@ -3491,6 +3572,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
3491 "rcu_node_fqs_1", 3572 "rcu_node_fqs_1",
3492 "rcu_node_fqs_2", 3573 "rcu_node_fqs_2",
3493 "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */ 3574 "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */
3575 static u8 fl_mask = 0x1;
3494 int cpustride = 1; 3576 int cpustride = 1;
3495 int i; 3577 int i;
3496 int j; 3578 int j;
@@ -3509,6 +3591,8 @@ static void __init rcu_init_one(struct rcu_state *rsp,
3509 for (i = 1; i < rcu_num_lvls; i++) 3591 for (i = 1; i < rcu_num_lvls; i++)
3510 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; 3592 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
3511 rcu_init_levelspread(rsp); 3593 rcu_init_levelspread(rsp);
3594 rsp->flavor_mask = fl_mask;
3595 fl_mask <<= 1;
3512 3596
3513 /* Initialize the elements themselves, starting from the leaves. */ 3597 /* Initialize the elements themselves, starting from the leaves. */
3514 3598
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bf2c1e669691..0f69a79c5b7d 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -307,6 +307,9 @@ struct rcu_data {
307 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ 307 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
308 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ 308 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
309 unsigned long offline_fqs; /* Kicked due to being offline. */ 309 unsigned long offline_fqs; /* Kicked due to being offline. */
310 unsigned long cond_resched_completed;
311 /* Grace period that needs help */
312 /* from cond_resched(). */
310 313
311 /* 5) __rcu_pending() statistics. */ 314 /* 5) __rcu_pending() statistics. */
312 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ 315 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
@@ -392,6 +395,7 @@ struct rcu_state {
392 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */ 395 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */
393 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ 396 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
394 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 397 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
398 u8 flavor_mask; /* bit in flavor mask. */
395 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ 399 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
396 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */ 400 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
397 void (*func)(struct rcu_head *head)); 401 void (*func)(struct rcu_head *head));
@@ -563,7 +567,7 @@ static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
563static void do_nocb_deferred_wakeup(struct rcu_data *rdp); 567static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
564static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); 568static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
565static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); 569static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
566static void rcu_kick_nohz_cpu(int cpu); 570static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
567static bool init_nocb_callback_list(struct rcu_data *rdp); 571static bool init_nocb_callback_list(struct rcu_data *rdp);
568static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq); 572static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
569static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq); 573static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index cbc2c45265e2..02ac0fb186b8 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2404,7 +2404,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
2404 * if an adaptive-ticks CPU is failing to respond to the current grace 2404 * if an adaptive-ticks CPU is failing to respond to the current grace
2405 * period and has not be idle from an RCU perspective, kick it. 2405 * period and has not be idle from an RCU perspective, kick it.
2406 */ 2406 */
2407static void rcu_kick_nohz_cpu(int cpu) 2407static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2408{ 2408{
2409#ifdef CONFIG_NO_HZ_FULL 2409#ifdef CONFIG_NO_HZ_FULL
2410 if (tick_nohz_full_cpu(cpu)) 2410 if (tick_nohz_full_cpu(cpu))
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index a2aeb4df0f60..bc7883570530 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -200,12 +200,12 @@ void wait_rcu_gp(call_rcu_func_t crf)
200EXPORT_SYMBOL_GPL(wait_rcu_gp); 200EXPORT_SYMBOL_GPL(wait_rcu_gp);
201 201
202#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 202#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
203static inline void debug_init_rcu_head(struct rcu_head *head) 203void init_rcu_head(struct rcu_head *head)
204{ 204{
205 debug_object_init(head, &rcuhead_debug_descr); 205 debug_object_init(head, &rcuhead_debug_descr);
206} 206}
207 207
208static inline void debug_rcu_head_free(struct rcu_head *head) 208void destroy_rcu_head(struct rcu_head *head)
209{ 209{
210 debug_object_free(head, &rcuhead_debug_descr); 210 debug_object_free(head, &rcuhead_debug_descr);
211} 211}
@@ -350,21 +350,3 @@ static int __init check_cpu_stall_init(void)
350early_initcall(check_cpu_stall_init); 350early_initcall(check_cpu_stall_init);
351 351
352#endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 352#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
353
354/*
355 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
356 */
357
358DEFINE_PER_CPU(int, rcu_cond_resched_count);
359
360/*
361 * Report a set of RCU quiescent states, for use by cond_resched()
362 * and friends. Out of line due to being called infrequently.
363 */
364void rcu_resched(void)
365{
366 preempt_disable();
367 __this_cpu_write(rcu_cond_resched_count, 0);
368 rcu_note_context_switch(smp_processor_id());
369 preempt_enable();
370}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3bdf01b494fe..bc1638b33449 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4147,7 +4147,6 @@ static void __cond_resched(void)
4147 4147
4148int __sched _cond_resched(void) 4148int __sched _cond_resched(void)
4149{ 4149{
4150 rcu_cond_resched();
4151 if (should_resched()) { 4150 if (should_resched()) {
4152 __cond_resched(); 4151 __cond_resched();
4153 return 1; 4152 return 1;
@@ -4166,18 +4165,15 @@ EXPORT_SYMBOL(_cond_resched);
4166 */ 4165 */
4167int __cond_resched_lock(spinlock_t *lock) 4166int __cond_resched_lock(spinlock_t *lock)
4168{ 4167{
4169 bool need_rcu_resched = rcu_should_resched();
4170 int resched = should_resched(); 4168 int resched = should_resched();
4171 int ret = 0; 4169 int ret = 0;
4172 4170
4173 lockdep_assert_held(lock); 4171 lockdep_assert_held(lock);
4174 4172
4175 if (spin_needbreak(lock) || resched || need_rcu_resched) { 4173 if (spin_needbreak(lock) || resched) {
4176 spin_unlock(lock); 4174 spin_unlock(lock);
4177 if (resched) 4175 if (resched)
4178 __cond_resched(); 4176 __cond_resched();
4179 else if (unlikely(need_rcu_resched))
4180 rcu_resched();
4181 else 4177 else
4182 cpu_relax(); 4178 cpu_relax();
4183 ret = 1; 4179 ret = 1;
@@ -4191,7 +4187,6 @@ int __sched __cond_resched_softirq(void)
4191{ 4187{
4192 BUG_ON(!in_softirq()); 4188 BUG_ON(!in_softirq());
4193 4189
4194 rcu_cond_resched(); /* BH disabled OK, just recording QSes. */
4195 if (should_resched()) { 4190 if (should_resched()) {
4196 local_bh_enable(); 4191 local_bh_enable();
4197 __cond_resched(); 4192 __cond_resched();
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 695f9773bb60..627b3c34b821 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -608,7 +608,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
608 608
609 avg_atom = p->se.sum_exec_runtime; 609 avg_atom = p->se.sum_exec_runtime;
610 if (nr_switches) 610 if (nr_switches)
611 do_div(avg_atom, nr_switches); 611 avg_atom = div64_ul(avg_atom, nr_switches);
612 else 612 else
613 avg_atom = -1LL; 613 avg_atom = -1LL;
614 614
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 88c9c65a430d..fe75444ae7ec 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -585,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
585 struct itimerspec *new_setting, 585 struct itimerspec *new_setting,
586 struct itimerspec *old_setting) 586 struct itimerspec *old_setting)
587{ 587{
588 ktime_t exp;
589
588 if (!rtcdev) 590 if (!rtcdev)
589 return -ENOTSUPP; 591 return -ENOTSUPP;
590 592
593 if (flags & ~TIMER_ABSTIME)
594 return -EINVAL;
595
591 if (old_setting) 596 if (old_setting)
592 alarm_timer_get(timr, old_setting); 597 alarm_timer_get(timr, old_setting);
593 598
@@ -597,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
597 602
598 /* start the timer */ 603 /* start the timer */
599 timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval); 604 timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
600 alarm_start(&timr->it.alarm.alarmtimer, 605 exp = timespec_to_ktime(new_setting->it_value);
601 timespec_to_ktime(new_setting->it_value)); 606 /* Convert (if necessary) to absolute time */
607 if (flags != TIMER_ABSTIME) {
608 ktime_t now;
609
610 now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
611 exp = ktime_add(now, exp);
612 }
613
614 alarm_start(&timr->it.alarm.alarmtimer, exp);
602 return 0; 615 return 0;
603} 616}
604 617
@@ -730,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
730 if (!alarmtimer_get_rtcdev()) 743 if (!alarmtimer_get_rtcdev())
731 return -ENOTSUPP; 744 return -ENOTSUPP;
732 745
746 if (flags & ~TIMER_ABSTIME)
747 return -EINVAL;
748
733 if (!capable(CAP_WAKE_ALARM)) 749 if (!capable(CAP_WAKE_ALARM))
734 return -EPERM; 750 return -EPERM;
735 751
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index ad362c260ef4..9c94c19f1305 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -146,7 +146,8 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
146{ 146{
147 /* Nothing to do if we already reached the limit */ 147 /* Nothing to do if we already reached the limit */
148 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 148 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
149 printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n"); 149 printk_deferred(KERN_WARNING
150 "CE: Reprogramming failure. Giving up\n");
150 dev->next_event.tv64 = KTIME_MAX; 151 dev->next_event.tv64 = KTIME_MAX;
151 return -ETIME; 152 return -ETIME;
152 } 153 }
@@ -159,9 +160,10 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
159 if (dev->min_delta_ns > MIN_DELTA_LIMIT) 160 if (dev->min_delta_ns > MIN_DELTA_LIMIT)
160 dev->min_delta_ns = MIN_DELTA_LIMIT; 161 dev->min_delta_ns = MIN_DELTA_LIMIT;
161 162
162 printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n", 163 printk_deferred(KERN_WARNING
163 dev->name ? dev->name : "?", 164 "CE: %s increased min_delta_ns to %llu nsec\n",
164 (unsigned long long) dev->min_delta_ns); 165 dev->name ? dev->name : "?",
166 (unsigned long long) dev->min_delta_ns);
165 return 0; 167 return 0;
166} 168}
167 169
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 445106d2c729..01d2d15aa662 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -191,7 +191,8 @@ void __init sched_clock_postinit(void)
191 191
192static int sched_clock_suspend(void) 192static int sched_clock_suspend(void)
193{ 193{
194 sched_clock_poll(&sched_clock_timer); 194 update_sched_clock();
195 hrtimer_cancel(&sched_clock_timer);
195 cd.suspended = true; 196 cd.suspended = true;
196 return 0; 197 return 0;
197} 198}
@@ -199,6 +200,7 @@ static int sched_clock_suspend(void)
199static void sched_clock_resume(void) 200static void sched_clock_resume(void)
200{ 201{
201 cd.epoch_cyc = read_sched_clock(); 202 cd.epoch_cyc = read_sched_clock();
203 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
202 cd.suspended = false; 204 cd.suspended = false;
203} 205}
204 206
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 5b372e3ed675..ac9d1dad630b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -265,12 +265,12 @@ static void update_ftrace_function(void)
265 func = ftrace_ops_list_func; 265 func = ftrace_ops_list_func;
266 } 266 }
267 267
268 update_function_graph_func();
269
268 /* If there's no change, then do nothing more here */ 270 /* If there's no change, then do nothing more here */
269 if (ftrace_trace_function == func) 271 if (ftrace_trace_function == func)
270 return; 272 return;
271 273
272 update_function_graph_func();
273
274 /* 274 /*
275 * If we are using the list function, it doesn't care 275 * If we are using the list function, it doesn't care
276 * about the function_trace_ops. 276 * about the function_trace_ops.
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7c56c3d06943..ff7027199a9a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
616 struct ring_buffer_per_cpu *cpu_buffer; 616 struct ring_buffer_per_cpu *cpu_buffer;
617 struct rb_irq_work *work; 617 struct rb_irq_work *work;
618 618
619 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
620 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
621 return POLLIN | POLLRDNORM;
622
623 if (cpu == RING_BUFFER_ALL_CPUS) 619 if (cpu == RING_BUFFER_ALL_CPUS)
624 work = &buffer->irq_work; 620 work = &buffer->irq_work;
625 else { 621 else {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f243444a3772..291397e66669 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -466,6 +466,12 @@ int __trace_puts(unsigned long ip, const char *str, int size)
466 struct print_entry *entry; 466 struct print_entry *entry;
467 unsigned long irq_flags; 467 unsigned long irq_flags;
468 int alloc; 468 int alloc;
469 int pc;
470
471 if (!(trace_flags & TRACE_ITER_PRINTK))
472 return 0;
473
474 pc = preempt_count();
469 475
470 if (unlikely(tracing_selftest_running || tracing_disabled)) 476 if (unlikely(tracing_selftest_running || tracing_disabled))
471 return 0; 477 return 0;
@@ -475,7 +481,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
475 local_save_flags(irq_flags); 481 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer; 482 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 483 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count()); 484 irq_flags, pc);
479 if (!event) 485 if (!event)
480 return 0; 486 return 0;
481 487
@@ -492,6 +498,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
492 entry->buf[size] = '\0'; 498 entry->buf[size] = '\0';
493 499
494 __buffer_unlock_commit(buffer, event); 500 __buffer_unlock_commit(buffer, event);
501 ftrace_trace_stack(buffer, irq_flags, 4, pc);
495 502
496 return size; 503 return size;
497} 504}
@@ -509,6 +516,12 @@ int __trace_bputs(unsigned long ip, const char *str)
509 struct bputs_entry *entry; 516 struct bputs_entry *entry;
510 unsigned long irq_flags; 517 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry); 518 int size = sizeof(struct bputs_entry);
519 int pc;
520
521 if (!(trace_flags & TRACE_ITER_PRINTK))
522 return 0;
523
524 pc = preempt_count();
512 525
513 if (unlikely(tracing_selftest_running || tracing_disabled)) 526 if (unlikely(tracing_selftest_running || tracing_disabled))
514 return 0; 527 return 0;
@@ -516,7 +529,7 @@ int __trace_bputs(unsigned long ip, const char *str)
516 local_save_flags(irq_flags); 529 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer; 530 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 531 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count()); 532 irq_flags, pc);
520 if (!event) 533 if (!event)
521 return 0; 534 return 0;
522 535
@@ -525,6 +538,7 @@ int __trace_bputs(unsigned long ip, const char *str)
525 entry->str = str; 538 entry->str = str;
526 539
527 __buffer_unlock_commit(buffer, event); 540 __buffer_unlock_commit(buffer, event);
541 ftrace_trace_stack(buffer, irq_flags, 4, pc);
528 542
529 return 1; 543 return 1;
530} 544}
@@ -809,7 +823,7 @@ static struct {
809 { trace_clock_local, "local", 1 }, 823 { trace_clock_local, "local", 1 },
810 { trace_clock_global, "global", 1 }, 824 { trace_clock_global, "global", 1 },
811 { trace_clock_counter, "counter", 0 }, 825 { trace_clock_counter, "counter", 0 },
812 { trace_clock_jiffies, "uptime", 1 }, 826 { trace_clock_jiffies, "uptime", 0 },
813 { trace_clock, "perf", 1 }, 827 { trace_clock, "perf", 1 },
814 ARCH_TRACE_CLOCKS 828 ARCH_TRACE_CLOCKS
815}; 829};
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 26dc348332b7..57b67b1f24d1 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
59 59
60/* 60/*
61 * trace_jiffy_clock(): Simply use jiffies as a clock counter. 61 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
62 * Note that this use of jiffies_64 is not completely safe on
63 * 32-bit systems. But the window is tiny, and the effect if
64 * we are affected is that we will have an obviously bogus
65 * timestamp on a trace event - i.e. not life threatening.
62 */ 66 */
63u64 notrace trace_clock_jiffies(void) 67u64 notrace trace_clock_jiffies(void)
64{ 68{
65 u64 jiffy = jiffies - INITIAL_JIFFIES; 69 return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
66
67 /* Return nsecs */
68 return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
69} 70}
70 71
71/* 72/*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f99e0b3bca8c..2de53628689f 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -470,6 +470,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
470 470
471 list_del(&file->list); 471 list_del(&file->list);
472 remove_subsystem(file->system); 472 remove_subsystem(file->system);
473 free_event_filter(file->filter);
473 kmem_cache_free(file_cachep, file); 474 kmem_cache_free(file_cachep, file);
474} 475}
475 476
diff --git a/lib/cpumask.c b/lib/cpumask.c
index c101230658eb..b6513a9f2892 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -191,7 +191,7 @@ int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
191 191
192 i %= num_online_cpus(); 192 i %= num_online_cpus();
193 193
194 if (!cpumask_of_node(numa_node)) { 194 if (numa_node == -1 || !cpumask_of_node(numa_node)) {
195 /* Use all online cpu's for non numa aware system */ 195 /* Use all online cpu's for non numa aware system */
196 cpumask_copy(mask, cpu_online_mask); 196 cpumask_copy(mask, cpu_online_mask);
197 } else { 197 } else {
diff --git a/mm/filemap.c b/mm/filemap.c
index dafb06f70a09..900edfaf6df5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1031,18 +1031,21 @@ EXPORT_SYMBOL(find_lock_entry);
1031 * @mapping: the address_space to search 1031 * @mapping: the address_space to search
1032 * @offset: the page index 1032 * @offset: the page index
1033 * @fgp_flags: PCG flags 1033 * @fgp_flags: PCG flags
1034 * @gfp_mask: gfp mask to use if a page is to be allocated 1034 * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
1035 * @radix_gfp_mask: gfp mask to use for radix tree node allocation
1035 * 1036 *
1036 * Looks up the page cache slot at @mapping & @offset. 1037 * Looks up the page cache slot at @mapping & @offset.
1037 * 1038 *
1038 * PCG flags modify how the page is returned 1039 * PCG flags modify how the page is returned.
1039 * 1040 *
1040 * FGP_ACCESSED: the page will be marked accessed 1041 * FGP_ACCESSED: the page will be marked accessed
1041 * FGP_LOCK: Page is return locked 1042 * FGP_LOCK: Page is return locked
1042 * FGP_CREAT: If page is not present then a new page is allocated using 1043 * FGP_CREAT: If page is not present then a new page is allocated using
1043 * @gfp_mask and added to the page cache and the VM's LRU 1044 * @cache_gfp_mask and added to the page cache and the VM's LRU
1044 * list. The page is returned locked and with an increased 1045 * list. If radix tree nodes are allocated during page cache
1045 * refcount. Otherwise, %NULL is returned. 1046 * insertion then @radix_gfp_mask is used. The page is returned
1047 * locked and with an increased refcount. Otherwise, %NULL is
1048 * returned.
1046 * 1049 *
1047 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1050 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1048 * if the GFP flags specified for FGP_CREAT are atomic. 1051 * if the GFP flags specified for FGP_CREAT are atomic.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2024bbd573d2..7a0a73d2fcff 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -856,7 +856,7 @@ struct hstate *size_to_hstate(unsigned long size)
856 return NULL; 856 return NULL;
857} 857}
858 858
859static void free_huge_page(struct page *page) 859void free_huge_page(struct page *page)
860{ 860{
861 /* 861 /*
862 * Can't pass hstate in here because it is called from the 862 * Can't pass hstate in here because it is called from the
@@ -2604,6 +2604,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2604 } else { 2604 } else {
2605 if (cow) 2605 if (cow)
2606 huge_ptep_set_wrprotect(src, addr, src_pte); 2606 huge_ptep_set_wrprotect(src, addr, src_pte);
2607 entry = huge_ptep_get(src_pte);
2607 ptepage = pte_page(entry); 2608 ptepage = pte_page(entry);
2608 get_page(ptepage); 2609 get_page(ptepage);
2609 page_dup_rmap(ptepage); 2610 page_dup_rmap(ptepage);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a2c7bcb0e6eb..1f14a430c656 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5415,8 +5415,12 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
5415{ 5415{
5416 struct mem_cgroup_eventfd_list *ev; 5416 struct mem_cgroup_eventfd_list *ev;
5417 5417
5418 spin_lock(&memcg_oom_lock);
5419
5418 list_for_each_entry(ev, &memcg->oom_notify, list) 5420 list_for_each_entry(ev, &memcg->oom_notify, list)
5419 eventfd_signal(ev->eventfd, 1); 5421 eventfd_signal(ev->eventfd, 1);
5422
5423 spin_unlock(&memcg_oom_lock);
5420 return 0; 5424 return 0;
5421} 5425}
5422 5426
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c6399e328931..a013bc94ebbe 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
435 if (av == NULL) /* Not actually mapped anymore */ 435 if (av == NULL) /* Not actually mapped anymore */
436 return; 436 return;
437 437
438 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 438 pgoff = page_to_pgoff(page);
439 read_lock(&tasklist_lock); 439 read_lock(&tasklist_lock);
440 for_each_process (tsk) { 440 for_each_process (tsk) {
441 struct anon_vma_chain *vmac; 441 struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
469 mutex_lock(&mapping->i_mmap_mutex); 469 mutex_lock(&mapping->i_mmap_mutex);
470 read_lock(&tasklist_lock); 470 read_lock(&tasklist_lock);
471 for_each_process(tsk) { 471 for_each_process(tsk) {
472 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 472 pgoff_t pgoff = page_to_pgoff(page);
473 struct task_struct *t = task_early_kill(tsk, force_early); 473 struct task_struct *t = task_early_kill(tsk, force_early);
474 474
475 if (!t) 475 if (!t)
@@ -895,7 +895,13 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
895 struct page *hpage = *hpagep; 895 struct page *hpage = *hpagep;
896 struct page *ppage; 896 struct page *ppage;
897 897
898 if (PageReserved(p) || PageSlab(p) || !PageLRU(p)) 898 /*
899 * Here we are interested only in user-mapped pages, so skip any
900 * other types of pages.
901 */
902 if (PageReserved(p) || PageSlab(p))
903 return SWAP_SUCCESS;
904 if (!(PageLRU(hpage) || PageHuge(p)))
899 return SWAP_SUCCESS; 905 return SWAP_SUCCESS;
900 906
901 /* 907 /*
@@ -905,8 +911,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
905 if (!page_mapped(hpage)) 911 if (!page_mapped(hpage))
906 return SWAP_SUCCESS; 912 return SWAP_SUCCESS;
907 913
908 if (PageKsm(p)) 914 if (PageKsm(p)) {
915 pr_err("MCE %#lx: can't handle KSM pages.\n", pfn);
909 return SWAP_FAIL; 916 return SWAP_FAIL;
917 }
910 918
911 if (PageSwapCache(p)) { 919 if (PageSwapCache(p)) {
912 printk(KERN_ERR 920 printk(KERN_ERR
@@ -1229,7 +1237,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1229 */ 1237 */
1230 if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage) 1238 if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
1231 != SWAP_SUCCESS) { 1239 != SWAP_SUCCESS) {
1232 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); 1240 action_result(pfn, "unmapping failed", IGNORED);
1233 res = -EBUSY; 1241 res = -EBUSY;
1234 goto out; 1242 goto out;
1235 } 1243 }
diff --git a/mm/memory.c b/mm/memory.c
index d67fd9fcf1f2..8b44f765b645 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2758,23 +2758,18 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
2758 update_mmu_cache(vma, address, pte); 2758 update_mmu_cache(vma, address, pte);
2759} 2759}
2760 2760
2761static unsigned long fault_around_bytes = 65536; 2761static unsigned long fault_around_bytes = rounddown_pow_of_two(65536);
2762 2762
2763/*
2764 * fault_around_pages() and fault_around_mask() round down fault_around_bytes
2765 * to nearest page order. It's what do_fault_around() expects to see.
2766 */
2767static inline unsigned long fault_around_pages(void) 2763static inline unsigned long fault_around_pages(void)
2768{ 2764{
2769 return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE; 2765 return fault_around_bytes >> PAGE_SHIFT;
2770} 2766}
2771 2767
2772static inline unsigned long fault_around_mask(void) 2768static inline unsigned long fault_around_mask(void)
2773{ 2769{
2774 return ~(rounddown_pow_of_two(fault_around_bytes) - 1) & PAGE_MASK; 2770 return ~(fault_around_bytes - 1) & PAGE_MASK;
2775} 2771}
2776 2772
2777
2778#ifdef CONFIG_DEBUG_FS 2773#ifdef CONFIG_DEBUG_FS
2779static int fault_around_bytes_get(void *data, u64 *val) 2774static int fault_around_bytes_get(void *data, u64 *val)
2780{ 2775{
@@ -2782,11 +2777,19 @@ static int fault_around_bytes_get(void *data, u64 *val)
2782 return 0; 2777 return 0;
2783} 2778}
2784 2779
2780/*
2781 * fault_around_pages() and fault_around_mask() expects fault_around_bytes
2782 * rounded down to nearest page order. It's what do_fault_around() expects to
2783 * see.
2784 */
2785static int fault_around_bytes_set(void *data, u64 val) 2785static int fault_around_bytes_set(void *data, u64 val)
2786{ 2786{
2787 if (val / PAGE_SIZE > PTRS_PER_PTE) 2787 if (val / PAGE_SIZE > PTRS_PER_PTE)
2788 return -EINVAL; 2788 return -EINVAL;
2789 fault_around_bytes = val; 2789 if (val > PAGE_SIZE)
2790 fault_around_bytes = rounddown_pow_of_two(val);
2791 else
2792 fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
2790 return 0; 2793 return 0;
2791} 2794}
2792DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops, 2795DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
@@ -2882,7 +2885,8 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2882 * if page by the offset is not ready to be mapped (cold cache or 2885 * if page by the offset is not ready to be mapped (cold cache or
2883 * something). 2886 * something).
2884 */ 2887 */
2885 if (vma->vm_ops->map_pages && fault_around_pages() > 1) { 2888 if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
2889 fault_around_pages() > 1) {
2886 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2890 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2887 do_fault_around(vma, address, pte, pgoff, flags); 2891 do_fault_around(vma, address, pte, pgoff, flags);
2888 if (!pte_same(*pte, orig_pte)) 2892 if (!pte_same(*pte, orig_pte))
diff --git a/mm/migrate.c b/mm/migrate.c
index 9e0beaa91845..be6dbf995c0c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -988,9 +988,10 @@ out:
988 * it. Otherwise, putback_lru_page() will drop the reference grabbed 988 * it. Otherwise, putback_lru_page() will drop the reference grabbed
989 * during isolation. 989 * during isolation.
990 */ 990 */
991 if (rc != MIGRATEPAGE_SUCCESS && put_new_page) 991 if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
992 ClearPageSwapBacked(newpage);
992 put_new_page(newpage, private); 993 put_new_page(newpage, private);
993 else 994 } else
994 putback_lru_page(newpage); 995 putback_lru_page(newpage);
995 996
996 if (result) { 997 if (result) {
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 518e2c3f4c75..e0c943014eb7 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1306,9 +1306,9 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
1306 *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); 1306 *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
1307 1307
1308 if (bdi_bg_thresh) 1308 if (bdi_bg_thresh)
1309 *bdi_bg_thresh = div_u64((u64)*bdi_thresh * 1309 *bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh *
1310 background_thresh, 1310 background_thresh,
1311 dirty_thresh); 1311 dirty_thresh) : 0;
1312 1312
1313 /* 1313 /*
1314 * In order to avoid the stacked BDI deadlock we need 1314 * In order to avoid the stacked BDI deadlock we need
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0ea758b898fd..ef44ad736ca1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2447,7 +2447,7 @@ static inline int
2447gfp_to_alloc_flags(gfp_t gfp_mask) 2447gfp_to_alloc_flags(gfp_t gfp_mask)
2448{ 2448{
2449 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 2449 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2450 const gfp_t wait = gfp_mask & __GFP_WAIT; 2450 const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
2451 2451
2452 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ 2452 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2453 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 2453 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
@@ -2456,20 +2456,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
2456 * The caller may dip into page reserves a bit more if the caller 2456 * The caller may dip into page reserves a bit more if the caller
2457 * cannot run direct reclaim, or if the caller has realtime scheduling 2457 * cannot run direct reclaim, or if the caller has realtime scheduling
2458 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 2458 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
2459 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 2459 * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
2460 */ 2460 */
2461 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); 2461 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2462 2462
2463 if (!wait) { 2463 if (atomic) {
2464 /* 2464 /*
2465 * Not worth trying to allocate harder for 2465 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
2466 * __GFP_NOMEMALLOC even if it can't schedule. 2466 * if it can't schedule.
2467 */ 2467 */
2468 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2468 if (!(gfp_mask & __GFP_NOMEMALLOC))
2469 alloc_flags |= ALLOC_HARDER; 2469 alloc_flags |= ALLOC_HARDER;
2470 /* 2470 /*
2471 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 2471 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
2472 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 2472 * comment for __cpuset_node_allowed_softwall().
2473 */ 2473 */
2474 alloc_flags &= ~ALLOC_CPUSET; 2474 alloc_flags &= ~ALLOC_CPUSET;
2475 } else if (unlikely(rt_task(current)) && !in_interrupt()) 2475 } else if (unlikely(rt_task(current)) && !in_interrupt())
@@ -6062,11 +6062,13 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
6062} 6062}
6063 6063
6064/** 6064/**
6065 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages 6065 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
6066 * @page: The page within the block of interest 6066 * @page: The page within the block of interest
6067 * @start_bitidx: The first bit of interest to retrieve 6067 * @pfn: The target page frame number
6068 * @end_bitidx: The last bit of interest 6068 * @end_bitidx: The last bit of interest to retrieve
6069 * returns pageblock_bits flags 6069 * @mask: mask of bits that the caller is interested in
6070 *
6071 * Return: pageblock_bits flags
6070 */ 6072 */
6071unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, 6073unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
6072 unsigned long end_bitidx, 6074 unsigned long end_bitidx,
@@ -6091,9 +6093,10 @@ unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
6091/** 6093/**
6092 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 6094 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
6093 * @page: The page within the block of interest 6095 * @page: The page within the block of interest
6094 * @start_bitidx: The first bit of interest
6095 * @end_bitidx: The last bit of interest
6096 * @flags: The flags to set 6096 * @flags: The flags to set
6097 * @pfn: The target page frame number
6098 * @end_bitidx: The last bit of interest
6099 * @mask: mask of bits that the caller is interested in
6097 */ 6100 */
6098void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 6101void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6099 unsigned long pfn, 6102 unsigned long pfn,
diff --git a/mm/rmap.c b/mm/rmap.c
index b7e94ebbd09e..22a4a7699cdb 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
517static inline unsigned long 517static inline unsigned long
518__vma_address(struct page *page, struct vm_area_struct *vma) 518__vma_address(struct page *page, struct vm_area_struct *vma)
519{ 519{
520 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 520 pgoff_t pgoff = page_to_pgoff(page);
521
522 if (unlikely(is_vm_hugetlb_page(vma)))
523 pgoff = page->index << huge_page_order(page_hstate(page));
524
525 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 521 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
526} 522}
527 523
@@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1639static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) 1635static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1640{ 1636{
1641 struct anon_vma *anon_vma; 1637 struct anon_vma *anon_vma;
1642 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1638 pgoff_t pgoff = page_to_pgoff(page);
1643 struct anon_vma_chain *avc; 1639 struct anon_vma_chain *avc;
1644 int ret = SWAP_AGAIN; 1640 int ret = SWAP_AGAIN;
1645 1641
@@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1680static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) 1676static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1681{ 1677{
1682 struct address_space *mapping = page->mapping; 1678 struct address_space *mapping = page->mapping;
1683 pgoff_t pgoff = page->index << compound_order(page); 1679 pgoff_t pgoff = page_to_pgoff(page);
1684 struct vm_area_struct *vma; 1680 struct vm_area_struct *vma;
1685 int ret = SWAP_AGAIN; 1681 int ret = SWAP_AGAIN;
1686 1682
diff --git a/mm/shmem.c b/mm/shmem.c
index 1140f49b6ded..af68b15a8fc1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
85 * a time): we would prefer not to enlarge the shmem inode just for that. 85 * a time): we would prefer not to enlarge the shmem inode just for that.
86 */ 86 */
87struct shmem_falloc { 87struct shmem_falloc {
88 int mode; /* FALLOC_FL mode currently operating */ 88 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
89 pgoff_t start; /* start of range currently being fallocated */ 89 pgoff_t start; /* start of range currently being fallocated */
90 pgoff_t next; /* the next page offset to be fallocated */ 90 pgoff_t next; /* the next page offset to be fallocated */
91 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 91 pgoff_t nr_falloced; /* how many new pages have been fallocated */
@@ -468,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
468 return; 468 return;
469 469
470 index = start; 470 index = start;
471 for ( ; ; ) { 471 while (index < end) {
472 cond_resched(); 472 cond_resched();
473 473
474 pvec.nr = find_get_entries(mapping, index, 474 pvec.nr = find_get_entries(mapping, index,
475 min(end - index, (pgoff_t)PAGEVEC_SIZE), 475 min(end - index, (pgoff_t)PAGEVEC_SIZE),
476 pvec.pages, indices); 476 pvec.pages, indices);
477 if (!pvec.nr) { 477 if (!pvec.nr) {
478 if (index == start || unfalloc) 478 /* If all gone or hole-punch or unfalloc, we're done */
479 if (index == start || end != -1)
479 break; 480 break;
481 /* But if truncating, restart to make sure all gone */
480 index = start; 482 index = start;
481 continue; 483 continue;
482 } 484 }
483 if ((index == start || unfalloc) && indices[0] >= end) {
484 pagevec_remove_exceptionals(&pvec);
485 pagevec_release(&pvec);
486 break;
487 }
488 mem_cgroup_uncharge_start(); 485 mem_cgroup_uncharge_start();
489 for (i = 0; i < pagevec_count(&pvec); i++) { 486 for (i = 0; i < pagevec_count(&pvec); i++) {
490 struct page *page = pvec.pages[i]; 487 struct page *page = pvec.pages[i];
@@ -496,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
496 if (radix_tree_exceptional_entry(page)) { 493 if (radix_tree_exceptional_entry(page)) {
497 if (unfalloc) 494 if (unfalloc)
498 continue; 495 continue;
499 nr_swaps_freed += !shmem_free_swap(mapping, 496 if (shmem_free_swap(mapping, index, page)) {
500 index, page); 497 /* Swap was replaced by page: retry */
498 index--;
499 break;
500 }
501 nr_swaps_freed++;
501 continue; 502 continue;
502 } 503 }
503 504
@@ -506,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
506 if (page->mapping == mapping) { 507 if (page->mapping == mapping) {
507 VM_BUG_ON_PAGE(PageWriteback(page), page); 508 VM_BUG_ON_PAGE(PageWriteback(page), page);
508 truncate_inode_page(mapping, page); 509 truncate_inode_page(mapping, page);
510 } else {
511 /* Page was replaced by swap: retry */
512 unlock_page(page);
513 index--;
514 break;
509 } 515 }
510 } 516 }
511 unlock_page(page); 517 unlock_page(page);
@@ -760,7 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
760 spin_lock(&inode->i_lock); 766 spin_lock(&inode->i_lock);
761 shmem_falloc = inode->i_private; 767 shmem_falloc = inode->i_private;
762 if (shmem_falloc && 768 if (shmem_falloc &&
763 !shmem_falloc->mode && 769 !shmem_falloc->waitq &&
764 index >= shmem_falloc->start && 770 index >= shmem_falloc->start &&
765 index < shmem_falloc->next) 771 index < shmem_falloc->next)
766 shmem_falloc->nr_unswapped++; 772 shmem_falloc->nr_unswapped++;
@@ -1248,38 +1254,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1248 * Trinity finds that probing a hole which tmpfs is punching can 1254 * Trinity finds that probing a hole which tmpfs is punching can
1249 * prevent the hole-punch from ever completing: which in turn 1255 * prevent the hole-punch from ever completing: which in turn
1250 * locks writers out with its hold on i_mutex. So refrain from 1256 * locks writers out with its hold on i_mutex. So refrain from
1251 * faulting pages into the hole while it's being punched, and 1257 * faulting pages into the hole while it's being punched. Although
1252 * wait on i_mutex to be released if vmf->flags permits. 1258 * shmem_undo_range() does remove the additions, it may be unable to
1259 * keep up, as each new page needs its own unmap_mapping_range() call,
1260 * and the i_mmap tree grows ever slower to scan if new vmas are added.
1261 *
1262 * It does not matter if we sometimes reach this check just before the
1263 * hole-punch begins, so that one fault then races with the punch:
1264 * we just need to make racing faults a rare case.
1265 *
1266 * The implementation below would be much simpler if we just used a
1267 * standard mutex or completion: but we cannot take i_mutex in fault,
1268 * and bloating every shmem inode for this unlikely case would be sad.
1253 */ 1269 */
1254 if (unlikely(inode->i_private)) { 1270 if (unlikely(inode->i_private)) {
1255 struct shmem_falloc *shmem_falloc; 1271 struct shmem_falloc *shmem_falloc;
1256 1272
1257 spin_lock(&inode->i_lock); 1273 spin_lock(&inode->i_lock);
1258 shmem_falloc = inode->i_private; 1274 shmem_falloc = inode->i_private;
1259 if (!shmem_falloc || 1275 if (shmem_falloc &&
1260 shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE || 1276 shmem_falloc->waitq &&
1261 vmf->pgoff < shmem_falloc->start || 1277 vmf->pgoff >= shmem_falloc->start &&
1262 vmf->pgoff >= shmem_falloc->next) 1278 vmf->pgoff < shmem_falloc->next) {
1263 shmem_falloc = NULL; 1279 wait_queue_head_t *shmem_falloc_waitq;
1264 spin_unlock(&inode->i_lock); 1280 DEFINE_WAIT(shmem_fault_wait);
1265 /* 1281
1266 * i_lock has protected us from taking shmem_falloc seriously 1282 ret = VM_FAULT_NOPAGE;
1267 * once return from shmem_fallocate() went back up that stack.
1268 * i_lock does not serialize with i_mutex at all, but it does
1269 * not matter if sometimes we wait unnecessarily, or sometimes
1270 * miss out on waiting: we just need to make those cases rare.
1271 */
1272 if (shmem_falloc) {
1273 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 1283 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1274 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 1284 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1285 /* It's polite to up mmap_sem if we can */
1275 up_read(&vma->vm_mm->mmap_sem); 1286 up_read(&vma->vm_mm->mmap_sem);
1276 mutex_lock(&inode->i_mutex); 1287 ret = VM_FAULT_RETRY;
1277 mutex_unlock(&inode->i_mutex);
1278 return VM_FAULT_RETRY;
1279 } 1288 }
1280 /* cond_resched? Leave that to GUP or return to user */ 1289
1281 return VM_FAULT_NOPAGE; 1290 shmem_falloc_waitq = shmem_falloc->waitq;
1291 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1292 TASK_UNINTERRUPTIBLE);
1293 spin_unlock(&inode->i_lock);
1294 schedule();
1295
1296 /*
1297 * shmem_falloc_waitq points into the shmem_fallocate()
1298 * stack of the hole-punching task: shmem_falloc_waitq
1299 * is usually invalid by the time we reach here, but
1300 * finish_wait() does not dereference it in that case;
1301 * though i_lock needed lest racing with wake_up_all().
1302 */
1303 spin_lock(&inode->i_lock);
1304 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1305 spin_unlock(&inode->i_lock);
1306 return ret;
1282 } 1307 }
1308 spin_unlock(&inode->i_lock);
1283 } 1309 }
1284 1310
1285 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1311 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1774,13 +1800,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1774 1800
1775 mutex_lock(&inode->i_mutex); 1801 mutex_lock(&inode->i_mutex);
1776 1802
1777 shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
1778
1779 if (mode & FALLOC_FL_PUNCH_HOLE) { 1803 if (mode & FALLOC_FL_PUNCH_HOLE) {
1780 struct address_space *mapping = file->f_mapping; 1804 struct address_space *mapping = file->f_mapping;
1781 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1805 loff_t unmap_start = round_up(offset, PAGE_SIZE);
1782 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1806 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1807 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
1783 1808
1809 shmem_falloc.waitq = &shmem_falloc_waitq;
1784 shmem_falloc.start = unmap_start >> PAGE_SHIFT; 1810 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
1785 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 1811 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
1786 spin_lock(&inode->i_lock); 1812 spin_lock(&inode->i_lock);
@@ -1792,8 +1818,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1792 1 + unmap_end - unmap_start, 0); 1818 1 + unmap_end - unmap_start, 0);
1793 shmem_truncate_range(inode, offset, offset + len - 1); 1819 shmem_truncate_range(inode, offset, offset + len - 1);
1794 /* No need to unmap again: hole-punching leaves COWed pages */ 1820 /* No need to unmap again: hole-punching leaves COWed pages */
1821
1822 spin_lock(&inode->i_lock);
1823 inode->i_private = NULL;
1824 wake_up_all(&shmem_falloc_waitq);
1825 spin_unlock(&inode->i_lock);
1795 error = 0; 1826 error = 0;
1796 goto undone; 1827 goto out;
1797 } 1828 }
1798 1829
1799 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1830 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1809,6 +1840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1809 goto out; 1840 goto out;
1810 } 1841 }
1811 1842
1843 shmem_falloc.waitq = NULL;
1812 shmem_falloc.start = start; 1844 shmem_falloc.start = start;
1813 shmem_falloc.next = start; 1845 shmem_falloc.next = start;
1814 shmem_falloc.nr_falloced = 0; 1846 shmem_falloc.nr_falloced = 0;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 735e01a0db6f..d31c4bacc6a2 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -55,7 +55,7 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
55 continue; 55 continue;
56 } 56 }
57 57
58#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON) 58#if !defined(CONFIG_SLUB)
59 if (!strcmp(s->name, name)) { 59 if (!strcmp(s->name, name)) {
60 pr_err("%s (%s): Cache name already exists.\n", 60 pr_err("%s (%s): Cache name already exists.\n",
61 __func__, name); 61 __func__, name);
diff --git a/mm/truncate.c b/mm/truncate.c
index 6a78c814bebf..eda247307164 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -355,14 +355,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
355 for ( ; ; ) { 355 for ( ; ; ) {
356 cond_resched(); 356 cond_resched();
357 if (!pagevec_lookup_entries(&pvec, mapping, index, 357 if (!pagevec_lookup_entries(&pvec, mapping, index,
358 min(end - index, (pgoff_t)PAGEVEC_SIZE), 358 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
359 indices)) { 359 /* If all gone from start onwards, we're done */
360 if (index == start) 360 if (index == start)
361 break; 361 break;
362 /* Otherwise restart to make sure all gone */
362 index = start; 363 index = start;
363 continue; 364 continue;
364 } 365 }
365 if (index == start && indices[0] >= end) { 366 if (index == start && indices[0] >= end) {
367 /* All gone out of hole to be punched, we're done */
366 pagevec_remove_exceptionals(&pvec); 368 pagevec_remove_exceptionals(&pvec);
367 pagevec_release(&pvec); 369 pagevec_release(&pvec);
368 break; 370 break;
@@ -373,8 +375,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
373 375
374 /* We rely upon deletion not changing page->index */ 376 /* We rely upon deletion not changing page->index */
375 index = indices[i]; 377 index = indices[i];
376 if (index >= end) 378 if (index >= end) {
379 /* Restart punch to make sure all gone */
380 index = start - 1;
377 break; 381 break;
382 }
378 383
379 if (radix_tree_exceptional_entry(page)) { 384 if (radix_tree_exceptional_entry(page)) {
380 clear_exceptional_entry(mapping, index, page); 385 clear_exceptional_entry(mapping, index, page);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ad2ac3c00398..dd11f612e03e 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -627,8 +627,6 @@ static void vlan_dev_uninit(struct net_device *dev)
627 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 627 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
628 int i; 628 int i;
629 629
630 free_percpu(vlan->vlan_pcpu_stats);
631 vlan->vlan_pcpu_stats = NULL;
632 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { 630 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
633 while ((pm = vlan->egress_priority_map[i]) != NULL) { 631 while ((pm = vlan->egress_priority_map[i]) != NULL) {
634 vlan->egress_priority_map[i] = pm->next; 632 vlan->egress_priority_map[i] = pm->next;
@@ -785,6 +783,15 @@ static const struct net_device_ops vlan_netdev_ops = {
785 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, 783 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
786}; 784};
787 785
786static void vlan_dev_free(struct net_device *dev)
787{
788 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
789
790 free_percpu(vlan->vlan_pcpu_stats);
791 vlan->vlan_pcpu_stats = NULL;
792 free_netdev(dev);
793}
794
788void vlan_setup(struct net_device *dev) 795void vlan_setup(struct net_device *dev)
789{ 796{
790 ether_setup(dev); 797 ether_setup(dev);
@@ -794,7 +801,7 @@ void vlan_setup(struct net_device *dev)
794 dev->tx_queue_len = 0; 801 dev->tx_queue_len = 0;
795 802
796 dev->netdev_ops = &vlan_netdev_ops; 803 dev->netdev_ops = &vlan_netdev_ops;
797 dev->destructor = free_netdev; 804 dev->destructor = vlan_dev_free;
798 dev->ethtool_ops = &vlan_ethtool_ops; 805 dev->ethtool_ops = &vlan_ethtool_ops;
799 806
800 memset(dev->broadcast, 0, ETH_ALEN); 807 memset(dev->broadcast, 0, ETH_ALEN);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 01a1082e02b3..bfcf6be1d665 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1489 goto drop; 1489 goto drop;
1490 1490
1491 /* Queue packet (standard) */ 1491 /* Queue packet (standard) */
1492 skb->sk = sock;
1493
1494 if (sock_queue_rcv_skb(sock, skb) < 0) 1492 if (sock_queue_rcv_skb(sock, skb) < 0)
1495 goto drop; 1493 goto drop;
1496 1494
@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1644 if (!skb) 1642 if (!skb)
1645 goto out; 1643 goto out;
1646 1644
1647 skb->sk = sk;
1648 skb_reserve(skb, ddp_dl->header_length); 1645 skb_reserve(skb, ddp_dl->header_length);
1649 skb_reserve(skb, dev->hard_header_len); 1646 skb_reserve(skb, dev->hard_header_len);
1650 skb->dev = dev; 1647 skb->dev = dev;
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 6f0d9ec37950..a957c8140721 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -800,11 +800,6 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
800 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 800 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
801 bla_dst_own = &bat_priv->bla.claim_dest; 801 bla_dst_own = &bat_priv->bla.claim_dest;
802 802
803 /* check if it is a claim packet in general */
804 if (memcmp(bla_dst->magic, bla_dst_own->magic,
805 sizeof(bla_dst->magic)) != 0)
806 return 0;
807
808 /* if announcement packet, use the source, 803 /* if announcement packet, use the source,
809 * otherwise assume it is in the hw_src 804 * otherwise assume it is in the hw_src
810 */ 805 */
@@ -866,12 +861,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
866 struct batadv_hard_iface *primary_if, 861 struct batadv_hard_iface *primary_if,
867 struct sk_buff *skb) 862 struct sk_buff *skb)
868{ 863{
869 struct batadv_bla_claim_dst *bla_dst; 864 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
870 uint8_t *hw_src, *hw_dst; 865 uint8_t *hw_src, *hw_dst;
871 struct vlan_ethhdr *vhdr; 866 struct vlan_hdr *vhdr, vhdr_buf;
872 struct ethhdr *ethhdr; 867 struct ethhdr *ethhdr;
873 struct arphdr *arphdr; 868 struct arphdr *arphdr;
874 unsigned short vid; 869 unsigned short vid;
870 int vlan_depth = 0;
875 __be16 proto; 871 __be16 proto;
876 int headlen; 872 int headlen;
877 int ret; 873 int ret;
@@ -882,9 +878,24 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
882 proto = ethhdr->h_proto; 878 proto = ethhdr->h_proto;
883 headlen = ETH_HLEN; 879 headlen = ETH_HLEN;
884 if (vid & BATADV_VLAN_HAS_TAG) { 880 if (vid & BATADV_VLAN_HAS_TAG) {
885 vhdr = vlan_eth_hdr(skb); 881 /* Traverse the VLAN/Ethertypes.
886 proto = vhdr->h_vlan_encapsulated_proto; 882 *
887 headlen += VLAN_HLEN; 883 * At this point it is known that the first protocol is a VLAN
884 * header, so start checking at the encapsulated protocol.
885 *
886 * The depth of the VLAN headers is recorded to drop BLA claim
887 * frames encapsulated into multiple VLAN headers (QinQ).
888 */
889 do {
890 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
891 &vhdr_buf);
892 if (!vhdr)
893 return 0;
894
895 proto = vhdr->h_vlan_encapsulated_proto;
896 headlen += VLAN_HLEN;
897 vlan_depth++;
898 } while (proto == htons(ETH_P_8021Q));
888 } 899 }
889 900
890 if (proto != htons(ETH_P_ARP)) 901 if (proto != htons(ETH_P_ARP))
@@ -914,6 +925,19 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
914 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); 925 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
915 hw_dst = hw_src + ETH_ALEN + 4; 926 hw_dst = hw_src + ETH_ALEN + 4;
916 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 927 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
928 bla_dst_own = &bat_priv->bla.claim_dest;
929
930 /* check if it is a claim frame in general */
931 if (memcmp(bla_dst->magic, bla_dst_own->magic,
932 sizeof(bla_dst->magic)) != 0)
933 return 0;
934
935 /* check if there is a claim frame encapsulated deeper in (QinQ) and
936 * drop that, as this is not supported by BLA but should also not be
937 * sent via the mesh.
938 */
939 if (vlan_depth > 1)
940 return 1;
917 941
918 /* check if it is a claim frame. */ 942 /* check if it is a claim frame. */
919 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, 943 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index e7ee65dc20bf..cbd677f48c00 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -448,10 +448,15 @@ out:
448 * possibly free it 448 * possibly free it
449 * @softif_vlan: the vlan object to release 449 * @softif_vlan: the vlan object to release
450 */ 450 */
451void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan) 451void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
452{ 452{
453 if (atomic_dec_and_test(&softif_vlan->refcount)) 453 if (atomic_dec_and_test(&vlan->refcount)) {
454 kfree_rcu(softif_vlan, rcu); 454 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
455 hlist_del_rcu(&vlan->list);
456 spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
457
458 kfree_rcu(vlan, rcu);
459 }
455} 460}
456 461
457/** 462/**
@@ -505,6 +510,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
505 if (!vlan) 510 if (!vlan)
506 return -ENOMEM; 511 return -ENOMEM;
507 512
513 vlan->bat_priv = bat_priv;
508 vlan->vid = vid; 514 vlan->vid = vid;
509 atomic_set(&vlan->refcount, 1); 515 atomic_set(&vlan->refcount, 1);
510 516
@@ -516,6 +522,10 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
516 return err; 522 return err;
517 } 523 }
518 524
525 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
526 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
527 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
528
519 /* add a new TT local entry. This one will be marked with the NOPURGE 529 /* add a new TT local entry. This one will be marked with the NOPURGE
520 * flag 530 * flag
521 */ 531 */
@@ -523,10 +533,6 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
523 bat_priv->soft_iface->dev_addr, vid, 533 bat_priv->soft_iface->dev_addr, vid,
524 BATADV_NULL_IFINDEX, BATADV_NO_MARK); 534 BATADV_NULL_IFINDEX, BATADV_NO_MARK);
525 535
526 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
527 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
528 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
529
530 return 0; 536 return 0;
531} 537}
532 538
@@ -538,18 +544,13 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
538static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv, 544static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
539 struct batadv_softif_vlan *vlan) 545 struct batadv_softif_vlan *vlan)
540{ 546{
541 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
542 hlist_del_rcu(&vlan->list);
543 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
544
545 batadv_sysfs_del_vlan(bat_priv, vlan);
546
547 /* explicitly remove the associated TT local entry because it is marked 547 /* explicitly remove the associated TT local entry because it is marked
548 * with the NOPURGE flag 548 * with the NOPURGE flag
549 */ 549 */
550 batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr, 550 batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
551 vlan->vid, "vlan interface destroyed", false); 551 vlan->vid, "vlan interface destroyed", false);
552 552
553 batadv_sysfs_del_vlan(bat_priv, vlan);
553 batadv_softif_vlan_free_ref(vlan); 554 batadv_softif_vlan_free_ref(vlan);
554} 555}
555 556
@@ -567,6 +568,8 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
567 unsigned short vid) 568 unsigned short vid)
568{ 569{
569 struct batadv_priv *bat_priv = netdev_priv(dev); 570 struct batadv_priv *bat_priv = netdev_priv(dev);
571 struct batadv_softif_vlan *vlan;
572 int ret;
570 573
571 /* only 802.1Q vlans are supported. 574 /* only 802.1Q vlans are supported.
572 * batman-adv does not know how to handle other types 575 * batman-adv does not know how to handle other types
@@ -576,7 +579,36 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
576 579
577 vid |= BATADV_VLAN_HAS_TAG; 580 vid |= BATADV_VLAN_HAS_TAG;
578 581
579 return batadv_softif_create_vlan(bat_priv, vid); 582 /* if a new vlan is getting created and it already exists, it means that
583 * it was not deleted yet. batadv_softif_vlan_get() increases the
584 * refcount in order to revive the object.
585 *
586 * if it does not exist then create it.
587 */
588 vlan = batadv_softif_vlan_get(bat_priv, vid);
589 if (!vlan)
590 return batadv_softif_create_vlan(bat_priv, vid);
591
592 /* recreate the sysfs object if it was already destroyed (and it should
593 * be since we received a kill_vid() for this vlan
594 */
595 if (!vlan->kobj) {
596 ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
597 if (ret) {
598 batadv_softif_vlan_free_ref(vlan);
599 return ret;
600 }
601 }
602
603 /* add a new TT local entry. This one will be marked with the NOPURGE
604 * flag. This must be added again, even if the vlan object already
605 * exists, because the entry was deleted by kill_vid()
606 */
607 batadv_tt_local_add(bat_priv->soft_iface,
608 bat_priv->soft_iface->dev_addr, vid,
609 BATADV_NULL_IFINDEX, BATADV_NO_MARK);
610
611 return 0;
580} 612}
581 613
582/** 614/**
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d636bde72c9a..5f59e7f899a0 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -511,6 +511,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
511 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 511 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
512 struct batadv_tt_local_entry *tt_local; 512 struct batadv_tt_local_entry *tt_local;
513 struct batadv_tt_global_entry *tt_global = NULL; 513 struct batadv_tt_global_entry *tt_global = NULL;
514 struct batadv_softif_vlan *vlan;
514 struct net_device *in_dev = NULL; 515 struct net_device *in_dev = NULL;
515 struct hlist_head *head; 516 struct hlist_head *head;
516 struct batadv_tt_orig_list_entry *orig_entry; 517 struct batadv_tt_orig_list_entry *orig_entry;
@@ -572,6 +573,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
572 if (!tt_local) 573 if (!tt_local)
573 goto out; 574 goto out;
574 575
576 /* increase the refcounter of the related vlan */
577 vlan = batadv_softif_vlan_get(bat_priv, vid);
578
575 batadv_dbg(BATADV_DBG_TT, bat_priv, 579 batadv_dbg(BATADV_DBG_TT, bat_priv,
576 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", 580 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
577 addr, BATADV_PRINT_VID(vid), 581 addr, BATADV_PRINT_VID(vid),
@@ -604,6 +608,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
604 if (unlikely(hash_added != 0)) { 608 if (unlikely(hash_added != 0)) {
605 /* remove the reference for the hash */ 609 /* remove the reference for the hash */
606 batadv_tt_local_entry_free_ref(tt_local); 610 batadv_tt_local_entry_free_ref(tt_local);
611 batadv_softif_vlan_free_ref(vlan);
607 goto out; 612 goto out;
608 } 613 }
609 614
@@ -1009,6 +1014,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1009{ 1014{
1010 struct batadv_tt_local_entry *tt_local_entry; 1015 struct batadv_tt_local_entry *tt_local_entry;
1011 uint16_t flags, curr_flags = BATADV_NO_FLAGS; 1016 uint16_t flags, curr_flags = BATADV_NO_FLAGS;
1017 struct batadv_softif_vlan *vlan;
1012 1018
1013 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); 1019 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
1014 if (!tt_local_entry) 1020 if (!tt_local_entry)
@@ -1039,6 +1045,11 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1039 hlist_del_rcu(&tt_local_entry->common.hash_entry); 1045 hlist_del_rcu(&tt_local_entry->common.hash_entry);
1040 batadv_tt_local_entry_free_ref(tt_local_entry); 1046 batadv_tt_local_entry_free_ref(tt_local_entry);
1041 1047
1048 /* decrease the reference held for this vlan */
1049 vlan = batadv_softif_vlan_get(bat_priv, vid);
1050 batadv_softif_vlan_free_ref(vlan);
1051 batadv_softif_vlan_free_ref(vlan);
1052
1042out: 1053out:
1043 if (tt_local_entry) 1054 if (tt_local_entry)
1044 batadv_tt_local_entry_free_ref(tt_local_entry); 1055 batadv_tt_local_entry_free_ref(tt_local_entry);
@@ -1111,6 +1122,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1111 spinlock_t *list_lock; /* protects write access to the hash lists */ 1122 spinlock_t *list_lock; /* protects write access to the hash lists */
1112 struct batadv_tt_common_entry *tt_common_entry; 1123 struct batadv_tt_common_entry *tt_common_entry;
1113 struct batadv_tt_local_entry *tt_local; 1124 struct batadv_tt_local_entry *tt_local;
1125 struct batadv_softif_vlan *vlan;
1114 struct hlist_node *node_tmp; 1126 struct hlist_node *node_tmp;
1115 struct hlist_head *head; 1127 struct hlist_head *head;
1116 uint32_t i; 1128 uint32_t i;
@@ -1131,6 +1143,13 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1131 tt_local = container_of(tt_common_entry, 1143 tt_local = container_of(tt_common_entry,
1132 struct batadv_tt_local_entry, 1144 struct batadv_tt_local_entry,
1133 common); 1145 common);
1146
1147 /* decrease the reference held for this vlan */
1148 vlan = batadv_softif_vlan_get(bat_priv,
1149 tt_common_entry->vid);
1150 batadv_softif_vlan_free_ref(vlan);
1151 batadv_softif_vlan_free_ref(vlan);
1152
1134 batadv_tt_local_entry_free_ref(tt_local); 1153 batadv_tt_local_entry_free_ref(tt_local);
1135 } 1154 }
1136 spin_unlock_bh(list_lock); 1155 spin_unlock_bh(list_lock);
@@ -3139,6 +3158,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3139 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 3158 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
3140 struct batadv_tt_common_entry *tt_common; 3159 struct batadv_tt_common_entry *tt_common;
3141 struct batadv_tt_local_entry *tt_local; 3160 struct batadv_tt_local_entry *tt_local;
3161 struct batadv_softif_vlan *vlan;
3142 struct hlist_node *node_tmp; 3162 struct hlist_node *node_tmp;
3143 struct hlist_head *head; 3163 struct hlist_head *head;
3144 spinlock_t *list_lock; /* protects write access to the hash lists */ 3164 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3167,6 +3187,12 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3167 tt_local = container_of(tt_common, 3187 tt_local = container_of(tt_common,
3168 struct batadv_tt_local_entry, 3188 struct batadv_tt_local_entry,
3169 common); 3189 common);
3190
3191 /* decrease the reference held for this vlan */
3192 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
3193 batadv_softif_vlan_free_ref(vlan);
3194 batadv_softif_vlan_free_ref(vlan);
3195
3170 batadv_tt_local_entry_free_ref(tt_local); 3196 batadv_tt_local_entry_free_ref(tt_local);
3171 } 3197 }
3172 spin_unlock_bh(list_lock); 3198 spin_unlock_bh(list_lock);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 34891a56773f..8854c05622a9 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -687,6 +687,7 @@ struct batadv_priv_nc {
687 687
688/** 688/**
689 * struct batadv_softif_vlan - per VLAN attributes set 689 * struct batadv_softif_vlan - per VLAN attributes set
690 * @bat_priv: pointer to the mesh object
690 * @vid: VLAN identifier 691 * @vid: VLAN identifier
691 * @kobj: kobject for sysfs vlan subdirectory 692 * @kobj: kobject for sysfs vlan subdirectory
692 * @ap_isolation: AP isolation state 693 * @ap_isolation: AP isolation state
@@ -696,6 +697,7 @@ struct batadv_priv_nc {
696 * @rcu: struct used for freeing in a RCU-safe manner 697 * @rcu: struct used for freeing in a RCU-safe manner
697 */ 698 */
698struct batadv_softif_vlan { 699struct batadv_softif_vlan {
700 struct batadv_priv *bat_priv;
699 unsigned short vid; 701 unsigned short vid;
700 struct kobject *kobj; 702 struct kobject *kobj;
701 atomic_t ap_isolation; /* boolean */ 703 atomic_t ap_isolation; /* boolean */
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ca01d1861854..a7a27bc2c0b1 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -289,10 +289,20 @@ static void hci_conn_timeout(struct work_struct *work)
289{ 289{
290 struct hci_conn *conn = container_of(work, struct hci_conn, 290 struct hci_conn *conn = container_of(work, struct hci_conn,
291 disc_work.work); 291 disc_work.work);
292 int refcnt = atomic_read(&conn->refcnt);
292 293
293 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); 294 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
294 295
295 if (atomic_read(&conn->refcnt)) 296 WARN_ON(refcnt < 0);
297
298 /* FIXME: It was observed that in pairing failed scenario, refcnt
299 * drops below 0. Probably this is because l2cap_conn_del calls
300 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
301 * dropped. After that loop hci_chan_del is called which also drops
302 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
303 * otherwise drop it.
304 */
305 if (refcnt > 0)
296 return; 306 return;
297 307
298 switch (conn->state) { 308 switch (conn->state) {
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index f2829a7932e2..e33a982161c1 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -385,6 +385,16 @@ static const u8 gen_method[5][5] = {
385 { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP }, 385 { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP },
386}; 386};
387 387
388static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
389{
390 /* If either side has unknown io_caps, use JUST WORKS */
391 if (local_io > SMP_IO_KEYBOARD_DISPLAY ||
392 remote_io > SMP_IO_KEYBOARD_DISPLAY)
393 return JUST_WORKS;
394
395 return gen_method[remote_io][local_io];
396}
397
388static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, 398static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
389 u8 local_io, u8 remote_io) 399 u8 local_io, u8 remote_io)
390{ 400{
@@ -401,14 +411,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
401 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io); 411 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
402 412
403 /* If neither side wants MITM, use JUST WORKS */ 413 /* If neither side wants MITM, use JUST WORKS */
404 /* If either side has unknown io_caps, use JUST WORKS */
405 /* Otherwise, look up method from the table */ 414 /* Otherwise, look up method from the table */
406 if (!(auth & SMP_AUTH_MITM) || 415 if (!(auth & SMP_AUTH_MITM))
407 local_io > SMP_IO_KEYBOARD_DISPLAY ||
408 remote_io > SMP_IO_KEYBOARD_DISPLAY)
409 method = JUST_WORKS; 416 method = JUST_WORKS;
410 else 417 else
411 method = gen_method[remote_io][local_io]; 418 method = get_auth_method(smp, local_io, remote_io);
412 419
413 /* If not bonding, don't ask user to confirm a Zero TK */ 420 /* If not bonding, don't ask user to confirm a Zero TK */
414 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM) 421 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
@@ -669,7 +676,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
669{ 676{
670 struct smp_cmd_pairing rsp, *req = (void *) skb->data; 677 struct smp_cmd_pairing rsp, *req = (void *) skb->data;
671 struct smp_chan *smp; 678 struct smp_chan *smp;
672 u8 key_size, auth; 679 u8 key_size, auth, sec_level;
673 int ret; 680 int ret;
674 681
675 BT_DBG("conn %p", conn); 682 BT_DBG("conn %p", conn);
@@ -695,7 +702,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
695 /* We didn't start the pairing, so match remote */ 702 /* We didn't start the pairing, so match remote */
696 auth = req->auth_req; 703 auth = req->auth_req;
697 704
698 conn->hcon->pending_sec_level = authreq_to_seclevel(auth); 705 sec_level = authreq_to_seclevel(auth);
706 if (sec_level > conn->hcon->pending_sec_level)
707 conn->hcon->pending_sec_level = sec_level;
708
709 /* If we need MITM check that it can be acheived */
710 if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
711 u8 method;
712
713 method = get_auth_method(smp, conn->hcon->io_capability,
714 req->io_capability);
715 if (method == JUST_WORKS || method == JUST_CFM)
716 return SMP_AUTH_REQUIREMENTS;
717 }
699 718
700 build_pairing_cmd(conn, req, &rsp, auth); 719 build_pairing_cmd(conn, req, &rsp, auth);
701 720
@@ -743,6 +762,16 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
743 if (check_enc_key_size(conn, key_size)) 762 if (check_enc_key_size(conn, key_size))
744 return SMP_ENC_KEY_SIZE; 763 return SMP_ENC_KEY_SIZE;
745 764
765 /* If we need MITM check that it can be acheived */
766 if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
767 u8 method;
768
769 method = get_auth_method(smp, req->io_capability,
770 rsp->io_capability);
771 if (method == JUST_WORKS || method == JUST_CFM)
772 return SMP_AUTH_REQUIREMENTS;
773 }
774
746 get_random_bytes(smp->prnd, sizeof(smp->prnd)); 775 get_random_bytes(smp->prnd, sizeof(smp->prnd));
747 776
748 smp->prsp[0] = SMP_CMD_PAIRING_RSP; 777 smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -838,6 +867,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
838 struct smp_cmd_pairing cp; 867 struct smp_cmd_pairing cp;
839 struct hci_conn *hcon = conn->hcon; 868 struct hci_conn *hcon = conn->hcon;
840 struct smp_chan *smp; 869 struct smp_chan *smp;
870 u8 sec_level;
841 871
842 BT_DBG("conn %p", conn); 872 BT_DBG("conn %p", conn);
843 873
@@ -847,7 +877,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
847 if (!(conn->hcon->link_mode & HCI_LM_MASTER)) 877 if (!(conn->hcon->link_mode & HCI_LM_MASTER))
848 return SMP_CMD_NOTSUPP; 878 return SMP_CMD_NOTSUPP;
849 879
850 hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); 880 sec_level = authreq_to_seclevel(rp->auth_req);
881 if (sec_level > hcon->pending_sec_level)
882 hcon->pending_sec_level = sec_level;
851 883
852 if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) 884 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
853 return 0; 885 return 0;
@@ -901,9 +933,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
901 if (smp_sufficient_security(hcon, sec_level)) 933 if (smp_sufficient_security(hcon, sec_level))
902 return 1; 934 return 1;
903 935
936 if (sec_level > hcon->pending_sec_level)
937 hcon->pending_sec_level = sec_level;
938
904 if (hcon->link_mode & HCI_LM_MASTER) 939 if (hcon->link_mode & HCI_LM_MASTER)
905 if (smp_ltk_encrypt(conn, sec_level)) 940 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
906 goto done; 941 return 0;
907 942
908 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) 943 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
909 return 0; 944 return 0;
@@ -918,7 +953,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
918 * requires it. 953 * requires it.
919 */ 954 */
920 if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT || 955 if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
921 sec_level > BT_SECURITY_MEDIUM) 956 hcon->pending_sec_level > BT_SECURITY_MEDIUM)
922 authreq |= SMP_AUTH_MITM; 957 authreq |= SMP_AUTH_MITM;
923 958
924 if (hcon->link_mode & HCI_LM_MASTER) { 959 if (hcon->link_mode & HCI_LM_MASTER) {
@@ -937,9 +972,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
937 972
938 set_bit(SMP_FLAG_INITIATOR, &smp->flags); 973 set_bit(SMP_FLAG_INITIATOR, &smp->flags);
939 974
940done:
941 hcon->pending_sec_level = sec_level;
942
943 return 0; 975 return 0;
944} 976}
945 977
diff --git a/net/compat.c b/net/compat.c
index 9a76eaf63184..bc8aeefddf3f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
85{ 85{
86 int tot_len; 86 int tot_len;
87 87
88 if (kern_msg->msg_namelen) { 88 if (kern_msg->msg_name && kern_msg->msg_namelen) {
89 if (mode == VERIFY_READ) { 89 if (mode == VERIFY_READ) {
90 int err = move_addr_to_kernel(kern_msg->msg_name, 90 int err = move_addr_to_kernel(kern_msg->msg_name,
91 kern_msg->msg_namelen, 91 kern_msg->msg_namelen,
@@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
93 if (err < 0) 93 if (err < 0)
94 return err; 94 return err;
95 } 95 }
96 if (kern_msg->msg_name) 96 kern_msg->msg_name = kern_address;
97 kern_msg->msg_name = kern_address; 97 } else {
98 } else
99 kern_msg->msg_name = NULL; 98 kern_msg->msg_name = NULL;
99 kern_msg->msg_namelen = 0;
100 }
100 101
101 tot_len = iov_from_user_compat_to_kern(kern_iov, 102 tot_len = iov_from_user_compat_to_kern(kern_iov,
102 (struct compat_iovec __user *)kern_msg->msg_iov, 103 (struct compat_iovec __user *)kern_msg->msg_iov,
diff --git a/net/core/dev.c b/net/core/dev.c
index 30eedf677913..367a586d0c8a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly; /* Taps */
148static struct list_head offload_base __read_mostly; 148static struct list_head offload_base __read_mostly;
149 149
150static int netif_rx_internal(struct sk_buff *skb); 150static int netif_rx_internal(struct sk_buff *skb);
151static int call_netdevice_notifiers_info(unsigned long val,
152 struct net_device *dev,
153 struct netdev_notifier_info *info);
151 154
152/* 155/*
153 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 156 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -1207,7 +1210,11 @@ EXPORT_SYMBOL(netdev_features_change);
1207void netdev_state_change(struct net_device *dev) 1210void netdev_state_change(struct net_device *dev)
1208{ 1211{
1209 if (dev->flags & IFF_UP) { 1212 if (dev->flags & IFF_UP) {
1210 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1213 struct netdev_notifier_change_info change_info;
1214
1215 change_info.flags_changed = 0;
1216 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1217 &change_info.info);
1211 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1218 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1212 } 1219 }
1213} 1220}
@@ -4089,6 +4096,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4089 skb->vlan_tci = 0; 4096 skb->vlan_tci = 0;
4090 skb->dev = napi->dev; 4097 skb->dev = napi->dev;
4091 skb->skb_iif = 0; 4098 skb->skb_iif = 0;
4099 skb->encapsulation = 0;
4100 skb_shinfo(skb)->gso_type = 0;
4092 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 4101 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4093 4102
4094 napi->skb = skb; 4103 napi->skb = skb;
@@ -4227,9 +4236,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
4227#endif 4236#endif
4228 napi->weight = weight_p; 4237 napi->weight = weight_p;
4229 local_irq_disable(); 4238 local_irq_disable();
4230 while (work < quota) { 4239 while (1) {
4231 struct sk_buff *skb; 4240 struct sk_buff *skb;
4232 unsigned int qlen;
4233 4241
4234 while ((skb = __skb_dequeue(&sd->process_queue))) { 4242 while ((skb = __skb_dequeue(&sd->process_queue))) {
4235 local_irq_enable(); 4243 local_irq_enable();
@@ -4243,24 +4251,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
4243 } 4251 }
4244 4252
4245 rps_lock(sd); 4253 rps_lock(sd);
4246 qlen = skb_queue_len(&sd->input_pkt_queue); 4254 if (skb_queue_empty(&sd->input_pkt_queue)) {
4247 if (qlen)
4248 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4249 &sd->process_queue);
4250
4251 if (qlen < quota - work) {
4252 /* 4255 /*
4253 * Inline a custom version of __napi_complete(). 4256 * Inline a custom version of __napi_complete().
4254 * only current cpu owns and manipulates this napi, 4257 * only current cpu owns and manipulates this napi,
4255 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 4258 * and NAPI_STATE_SCHED is the only possible flag set
4256 * we can use a plain write instead of clear_bit(), 4259 * on backlog.
4260 * We can use a plain write instead of clear_bit(),
4257 * and we dont need an smp_mb() memory barrier. 4261 * and we dont need an smp_mb() memory barrier.
4258 */ 4262 */
4259 list_del(&napi->poll_list); 4263 list_del(&napi->poll_list);
4260 napi->state = 0; 4264 napi->state = 0;
4265 rps_unlock(sd);
4261 4266
4262 quota = work + qlen; 4267 break;
4263 } 4268 }
4269
4270 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4271 &sd->process_queue);
4264 rps_unlock(sd); 4272 rps_unlock(sd);
4265 } 4273 }
4266 local_irq_enable(); 4274 local_irq_enable();
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 827dd6beb49c..e1ec45ab1e63 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
39{ 39{
40 int size, ct, err; 40 int size, ct, err;
41 41
42 if (m->msg_namelen) { 42 if (m->msg_name && m->msg_namelen) {
43 if (mode == VERIFY_READ) { 43 if (mode == VERIFY_READ) {
44 void __user *namep; 44 void __user *namep;
45 namep = (void __user __force *) m->msg_name; 45 namep = (void __user __force *) m->msg_name;
@@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
48 if (err < 0) 48 if (err < 0)
49 return err; 49 return err;
50 } 50 }
51 if (m->msg_name) 51 m->msg_name = address;
52 m->msg_name = address;
53 } else { 52 } else {
54 m->msg_name = NULL; 53 m->msg_name = NULL;
54 m->msg_namelen = 0;
55 } 55 }
56 56
57 size = m->msg_iovlen * sizeof(struct iovec); 57 size = m->msg_iovlen * sizeof(struct iovec);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 32d872eec7f5..ef31fef25e5a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2249,7 +2249,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2249 ndm->ndm_pad1 = 0; 2249 ndm->ndm_pad1 = 0;
2250 ndm->ndm_pad2 = 0; 2250 ndm->ndm_pad2 = 0;
2251 ndm->ndm_flags = pn->flags | NTF_PROXY; 2251 ndm->ndm_flags = pn->flags | NTF_PROXY;
2252 ndm->ndm_type = NDA_DST; 2252 ndm->ndm_type = RTN_UNICAST;
2253 ndm->ndm_ifindex = pn->dev->ifindex; 2253 ndm->ndm_ifindex = pn->dev->ifindex;
2254 ndm->ndm_state = NUD_NONE; 2254 ndm->ndm_state = NUD_NONE;
2255 2255
@@ -3059,11 +3059,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3059 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 3059 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3060 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 3060 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3061 } else { 3061 } else {
3062 struct neigh_table *tbl = p->tbl;
3062 dev_name_source = "default"; 3063 dev_name_source = "default";
3063 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); 3064 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3064 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; 3065 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3065 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; 3066 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3066 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3; 3067 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3067 } 3068 }
3068 3069
3069 if (handler) { 3070 if (handler) {
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 9a32f55cf9b9..39d2c39bdf87 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -151,7 +151,7 @@ int dns_query(const char *type, const char *name, size_t namelen,
151 goto put; 151 goto put;
152 152
153 memcpy(*_result, upayload->data, len); 153 memcpy(*_result, upayload->data, len);
154 *_result[len] = '\0'; 154 (*_result)[len] = '\0';
155 155
156 if (_expiry) 156 if (_expiry)
157 *_expiry = rkey->expiry; 157 *_expiry = rkey->expiry;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index d5e6836cf772..d156b3c5f363 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1429,6 +1429,9 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff)
1429 int proto = iph->protocol; 1429 int proto = iph->protocol;
1430 int err = -ENOSYS; 1430 int err = -ENOSYS;
1431 1431
1432 if (skb->encapsulation)
1433 skb_set_inner_network_header(skb, nhoff);
1434
1432 csum_replace2(&iph->check, iph->tot_len, newlen); 1435 csum_replace2(&iph->check, iph->tot_len, newlen);
1433 iph->tot_len = newlen; 1436 iph->tot_len = newlen;
1434 1437
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 4e9619bca732..0485bf7f8f03 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -68,6 +68,7 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
68 68
69 skb_push(skb, hdr_len); 69 skb_push(skb, hdr_len);
70 70
71 skb_reset_transport_header(skb);
71 greh = (struct gre_base_hdr *)skb->data; 72 greh = (struct gre_base_hdr *)skb->data;
72 greh->flags = tnl_flags_to_gre_flags(tpi->flags); 73 greh->flags = tnl_flags_to_gre_flags(tpi->flags);
73 greh->protocol = tpi->proto; 74 greh->protocol = tpi->proto;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index eb92deb12666..f0bdd47bbbcb 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -263,6 +263,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
263 int err = -ENOENT; 263 int err = -ENOENT;
264 __be16 type; 264 __be16 type;
265 265
266 skb->encapsulation = 1;
267 skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
268
266 type = greh->protocol; 269 type = greh->protocol;
267 if (greh->flags & GRE_KEY) 270 if (greh->flags & GRE_KEY)
268 grehlen += GRE_HEADER_SECTION; 271 grehlen += GRE_HEADER_SECTION;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 79c3d947a481..42b7bcf8045b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -739,8 +739,6 @@ static void icmp_unreach(struct sk_buff *skb)
739 /* fall through */ 739 /* fall through */
740 case 0: 740 case 0:
741 info = ntohs(icmph->un.frag.mtu); 741 info = ntohs(icmph->un.frag.mtu);
742 if (!info)
743 goto out;
744 } 742 }
745 break; 743 break;
746 case ICMP_SR_FAILED: 744 case ICMP_SR_FAILED:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6748d420f714..db710b059bab 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1944,6 +1944,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1944 1944
1945 rtnl_lock(); 1945 rtnl_lock();
1946 in_dev = ip_mc_find_dev(net, imr); 1946 in_dev = ip_mc_find_dev(net, imr);
1947 if (!in_dev) {
1948 ret = -ENODEV;
1949 goto out;
1950 }
1947 ifindex = imr->imr_ifindex; 1951 ifindex = imr->imr_ifindex;
1948 for (imlp = &inet->mc_list; 1952 for (imlp = &inet->mc_list;
1949 (iml = rtnl_dereference(*imlp)) != NULL; 1953 (iml = rtnl_dereference(*imlp)) != NULL;
@@ -1961,16 +1965,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1961 1965
1962 *imlp = iml->next_rcu; 1966 *imlp = iml->next_rcu;
1963 1967
1964 if (in_dev) 1968 ip_mc_dec_group(in_dev, group);
1965 ip_mc_dec_group(in_dev, group);
1966 rtnl_unlock(); 1969 rtnl_unlock();
1967 /* decrease mem now to avoid the memleak warning */ 1970 /* decrease mem now to avoid the memleak warning */
1968 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 1971 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
1969 kfree_rcu(iml, rcu); 1972 kfree_rcu(iml, rcu);
1970 return 0; 1973 return 0;
1971 } 1974 }
1972 if (!in_dev) 1975out:
1973 ret = -ENODEV;
1974 rtnl_unlock(); 1976 rtnl_unlock();
1975 return ret; 1977 return ret;
1976} 1978}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 5e7aecea05cd..ad382499bace 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
288 optptr++; 288 optptr++;
289 continue; 289 continue;
290 } 290 }
291 if (unlikely(l < 2)) {
292 pp_ptr = optptr;
293 goto error;
294 }
291 optlen = optptr[1]; 295 optlen = optptr[1];
292 if (optlen < 2 || optlen > l) { 296 if (optlen < 2 || optlen > l) {
293 pp_ptr = optptr; 297 pp_ptr = optptr;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 54b6731dab55..6f9de61dce5f 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -169,6 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
169 169
170 hlist_for_each_entry_rcu(t, head, hash_node) { 170 hlist_for_each_entry_rcu(t, head, hash_node) {
171 if (remote != t->parms.iph.daddr || 171 if (remote != t->parms.iph.daddr ||
172 t->parms.iph.saddr != 0 ||
172 !(t->dev->flags & IFF_UP)) 173 !(t->dev->flags & IFF_UP))
173 continue; 174 continue;
174 175
@@ -185,10 +186,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
185 head = &itn->tunnels[hash]; 186 head = &itn->tunnels[hash];
186 187
187 hlist_for_each_entry_rcu(t, head, hash_node) { 188 hlist_for_each_entry_rcu(t, head, hash_node) {
188 if ((local != t->parms.iph.saddr && 189 if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
189 (local != t->parms.iph.daddr || 190 (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
190 !ipv4_is_multicast(local))) || 191 continue;
191 !(t->dev->flags & IFF_UP)) 192
193 if (!(t->dev->flags & IFF_UP))
192 continue; 194 continue;
193 195
194 if (!ip_tunnel_key_match(&t->parms, flags, key)) 196 if (!ip_tunnel_key_match(&t->parms, flags, key))
@@ -205,6 +207,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
205 207
206 hlist_for_each_entry_rcu(t, head, hash_node) { 208 hlist_for_each_entry_rcu(t, head, hash_node) {
207 if (t->parms.i_key != key || 209 if (t->parms.i_key != key ||
210 t->parms.iph.saddr != 0 ||
211 t->parms.iph.daddr != 0 ||
208 !(t->dev->flags & IFF_UP)) 212 !(t->dev->flags & IFF_UP))
209 continue; 213 continue;
210 214
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 082239ffe34a..190199851c9a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -457,8 +457,31 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
457 return neigh_create(&arp_tbl, pkey, dev); 457 return neigh_create(&arp_tbl, pkey, dev);
458} 458}
459 459
460atomic_t *ip_idents __read_mostly; 460#define IP_IDENTS_SZ 2048u
461EXPORT_SYMBOL(ip_idents); 461struct ip_ident_bucket {
462 atomic_t id;
463 u32 stamp32;
464};
465
466static struct ip_ident_bucket *ip_idents __read_mostly;
467
468/* In order to protect privacy, we add a perturbation to identifiers
469 * if one generator is seldom used. This makes hard for an attacker
470 * to infer how many packets were sent between two points in time.
471 */
472u32 ip_idents_reserve(u32 hash, int segs)
473{
474 struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
475 u32 old = ACCESS_ONCE(bucket->stamp32);
476 u32 now = (u32)jiffies;
477 u32 delta = 0;
478
479 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
480 delta = prandom_u32_max(now - old);
481
482 return atomic_add_return(segs + delta, &bucket->id) - segs;
483}
484EXPORT_SYMBOL(ip_idents_reserve);
462 485
463void __ip_select_ident(struct iphdr *iph, int segs) 486void __ip_select_ident(struct iphdr *iph, int segs)
464{ 487{
@@ -467,7 +490,10 @@ void __ip_select_ident(struct iphdr *iph, int segs)
467 490
468 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd)); 491 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
469 492
470 hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd); 493 hash = jhash_3words((__force u32)iph->daddr,
494 (__force u32)iph->saddr,
495 iph->protocol,
496 ip_idents_hashrnd);
471 id = ip_idents_reserve(hash, segs); 497 id = ip_idents_reserve(hash, segs);
472 iph->id = htons(id); 498 iph->id = htons(id);
473} 499}
@@ -1010,7 +1036,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1010 const struct iphdr *iph = (const struct iphdr *) skb->data; 1036 const struct iphdr *iph = (const struct iphdr *) skb->data;
1011 struct flowi4 fl4; 1037 struct flowi4 fl4;
1012 struct rtable *rt; 1038 struct rtable *rt;
1013 struct dst_entry *dst; 1039 struct dst_entry *odst = NULL;
1014 bool new = false; 1040 bool new = false;
1015 1041
1016 bh_lock_sock(sk); 1042 bh_lock_sock(sk);
@@ -1018,16 +1044,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1018 if (!ip_sk_accept_pmtu(sk)) 1044 if (!ip_sk_accept_pmtu(sk))
1019 goto out; 1045 goto out;
1020 1046
1021 rt = (struct rtable *) __sk_dst_get(sk); 1047 odst = sk_dst_get(sk);
1022 1048
1023 if (sock_owned_by_user(sk) || !rt) { 1049 if (sock_owned_by_user(sk) || !odst) {
1024 __ipv4_sk_update_pmtu(skb, sk, mtu); 1050 __ipv4_sk_update_pmtu(skb, sk, mtu);
1025 goto out; 1051 goto out;
1026 } 1052 }
1027 1053
1028 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); 1054 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1029 1055
1030 if (!__sk_dst_check(sk, 0)) { 1056 rt = (struct rtable *)odst;
1057 if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
1031 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1058 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1032 if (IS_ERR(rt)) 1059 if (IS_ERR(rt))
1033 goto out; 1060 goto out;
@@ -1037,8 +1064,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1037 1064
1038 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); 1065 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1039 1066
1040 dst = dst_check(&rt->dst, 0); 1067 if (!dst_check(&rt->dst, 0)) {
1041 if (!dst) {
1042 if (new) 1068 if (new)
1043 dst_release(&rt->dst); 1069 dst_release(&rt->dst);
1044 1070
@@ -1050,10 +1076,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1050 } 1076 }
1051 1077
1052 if (new) 1078 if (new)
1053 __sk_dst_set(sk, &rt->dst); 1079 sk_dst_set(sk, &rt->dst);
1054 1080
1055out: 1081out:
1056 bh_unlock_sock(sk); 1082 bh_unlock_sock(sk);
1083 dst_release(odst);
1057} 1084}
1058EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1085EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1059 1086
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index eb1dde37e678..9d2118e5fbc7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1108 if (unlikely(tp->repair)) { 1108 if (unlikely(tp->repair)) {
1109 if (tp->repair_queue == TCP_RECV_QUEUE) { 1109 if (tp->repair_queue == TCP_RECV_QUEUE) {
1110 copied = tcp_send_rcvq(sk, msg, size); 1110 copied = tcp_send_rcvq(sk, msg, size);
1111 goto out; 1111 goto out_nopush;
1112 } 1112 }
1113 1113
1114 err = -EINVAL; 1114 err = -EINVAL;
@@ -1282,6 +1282,7 @@ wait_for_memory:
1282out: 1282out:
1283 if (copied) 1283 if (copied)
1284 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1284 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1285out_nopush:
1285 release_sock(sk); 1286 release_sock(sk);
1286 return copied + copied_syn; 1287 return copied + copied_syn;
1287 1288
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b5c23756965a..40639c288dc2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1106 } 1106 }
1107 1107
1108 /* D-SACK for already forgotten data... Do dumb counting. */ 1108 /* D-SACK for already forgotten data... Do dumb counting. */
1109 if (dup_sack && tp->undo_marker && tp->undo_retrans && 1109 if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
1110 !after(end_seq_0, prior_snd_una) && 1110 !after(end_seq_0, prior_snd_una) &&
1111 after(end_seq_0, tp->undo_marker)) 1111 after(end_seq_0, tp->undo_marker))
1112 tp->undo_retrans--; 1112 tp->undo_retrans--;
@@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
1187 1187
1188 /* Account D-SACK for retransmitted packet. */ 1188 /* Account D-SACK for retransmitted packet. */
1189 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1189 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1190 if (tp->undo_marker && tp->undo_retrans && 1190 if (tp->undo_marker && tp->undo_retrans > 0 &&
1191 after(end_seq, tp->undo_marker)) 1191 after(end_seq, tp->undo_marker))
1192 tp->undo_retrans--; 1192 tp->undo_retrans--;
1193 if (sacked & TCPCB_SACKED_ACKED) 1193 if (sacked & TCPCB_SACKED_ACKED)
@@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
1893 tp->lost_out = 0; 1893 tp->lost_out = 0;
1894 1894
1895 tp->undo_marker = 0; 1895 tp->undo_marker = 0;
1896 tp->undo_retrans = 0; 1896 tp->undo_retrans = -1;
1897} 1897}
1898 1898
1899void tcp_clear_retrans(struct tcp_sock *tp) 1899void tcp_clear_retrans(struct tcp_sock *tp)
@@ -2665,7 +2665,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2665 2665
2666 tp->prior_ssthresh = 0; 2666 tp->prior_ssthresh = 0;
2667 tp->undo_marker = tp->snd_una; 2667 tp->undo_marker = tp->snd_una;
2668 tp->undo_retrans = tp->retrans_out; 2668 tp->undo_retrans = tp->retrans_out ? : -1;
2669 2669
2670 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2670 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2671 if (!ece_ack) 2671 if (!ece_ack)
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 4e86c59ec7f7..55046ecd083e 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -309,7 +309,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
309 309
310 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, 310 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
311 iph->daddr, 0); 311 iph->daddr, 0);
312 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 312 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
313 313
314 return tcp_gro_complete(skb); 314 return tcp_gro_complete(skb);
315} 315}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d92bce0ea24e..179b51e6bda3 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2525,8 +2525,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2525 if (!tp->retrans_stamp) 2525 if (!tp->retrans_stamp)
2526 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2526 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2527 2527
2528 tp->undo_retrans += tcp_skb_pcount(skb);
2529
2530 /* snd_nxt is stored to detect loss of retransmitted segment, 2528 /* snd_nxt is stored to detect loss of retransmitted segment,
2531 * see tcp_input.c tcp_sacktag_write_queue(). 2529 * see tcp_input.c tcp_sacktag_write_queue().
2532 */ 2530 */
@@ -2534,6 +2532,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2534 } else if (err != -EBUSY) { 2532 } else if (err != -EBUSY) {
2535 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2533 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2536 } 2534 }
2535
2536 if (tp->undo_retrans < 0)
2537 tp->undo_retrans = 0;
2538 tp->undo_retrans += tcp_skb_pcount(skb);
2537 return err; 2539 return err;
2538} 2540}
2539 2541
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d92f94b7e402..7d5a8661df76 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1588,8 +1588,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1588 goto csum_error; 1588 goto csum_error;
1589 1589
1590 1590
1591 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) 1591 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
1592 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1593 is_udplite);
1592 goto drop; 1594 goto drop;
1595 }
1593 1596
1594 rc = 0; 1597 rc = 0;
1595 1598
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cb9df0eb4023..45702b8cd141 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -545,6 +545,8 @@ static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
545 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); 545 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
546 546
547 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd); 547 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
548 hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
549
548 id = ip_idents_reserve(hash, 1); 550 id = ip_idents_reserve(hash, 1);
549 fhdr->identification = htonl(id); 551 fhdr->identification = htonl(id);
550} 552}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 08b367c6b9cf..617f0958e164 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1301,8 +1301,17 @@ int igmp6_event_query(struct sk_buff *skb)
1301 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); 1301 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1302 len -= skb_network_header_len(skb); 1302 len -= skb_network_header_len(skb);
1303 1303
1304 /* Drop queries with not link local source */ 1304 /* RFC3810 6.2
1305 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) 1305 * Upon reception of an MLD message that contains a Query, the node
1306 * checks if the source address of the message is a valid link-local
1307 * address, if the Hop Limit is set to 1, and if the Router Alert
1308 * option is present in the Hop-By-Hop Options header of the IPv6
1309 * packet. If any of these checks fails, the packet is dropped.
1310 */
1311 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1312 ipv6_hdr(skb)->hop_limit != 1 ||
1313 !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1314 IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1306 return -EINVAL; 1315 return -EINVAL;
1307 1316
1308 idev = __in6_dev_get(skb->dev); 1317 idev = __in6_dev_get(skb->dev);
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 8517d3cd1aed..01b0ff9a0c2c 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
73 73
74 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr, 74 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
75 &iph->daddr, 0); 75 &iph->daddr, 0);
76 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 76 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
77 77
78 return tcp_gro_complete(skb); 78 return tcp_gro_complete(skb);
79} 79}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 95c834799288..7092ff78fd84 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -674,8 +674,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
674 goto csum_error; 674 goto csum_error;
675 } 675 }
676 676
677 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) 677 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
678 UDP6_INC_STATS_BH(sock_net(sk),
679 UDP_MIB_RCVBUFERRORS, is_udplite);
678 goto drop; 680 goto drop;
681 }
679 682
680 skb_dst_drop(skb); 683 skb_dst_drop(skb);
681 684
@@ -690,6 +693,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
690 bh_unlock_sock(sk); 693 bh_unlock_sock(sk);
691 694
692 return rc; 695 return rc;
696
693csum_error: 697csum_error:
694 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 698 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
695drop: 699drop:
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 950909f04ee6..13752d96275e 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1365,7 +1365,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1365 int err; 1365 int err;
1366 1366
1367 if (level != SOL_PPPOL2TP) 1367 if (level != SOL_PPPOL2TP)
1368 return udp_prot.setsockopt(sk, level, optname, optval, optlen); 1368 return -EINVAL;
1369 1369
1370 if (optlen < sizeof(int)) 1370 if (optlen < sizeof(int))
1371 return -EINVAL; 1371 return -EINVAL;
@@ -1491,7 +1491,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
1491 struct pppol2tp_session *ps; 1491 struct pppol2tp_session *ps;
1492 1492
1493 if (level != SOL_PPPOL2TP) 1493 if (level != SOL_PPPOL2TP)
1494 return udp_prot.getsockopt(sk, level, optname, optval, optlen); 1494 return -EINVAL;
1495 1495
1496 if (get_user(len, optlen)) 1496 if (get_user(len, optlen))
1497 return -EFAULT; 1497 return -EFAULT;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d7513a503be1..592f4b152ba8 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -472,12 +472,15 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
472{ 472{
473 struct ieee80211_sub_if_data *sdata = sta->sdata; 473 struct ieee80211_sub_if_data *sdata = sta->sdata;
474 struct ieee80211_local *local = sdata->local; 474 struct ieee80211_local *local = sdata->local;
475 struct rate_control_ref *ref = local->rate_ctrl; 475 struct rate_control_ref *ref = NULL;
476 struct timespec uptime; 476 struct timespec uptime;
477 u64 packets = 0; 477 u64 packets = 0;
478 u32 thr = 0; 478 u32 thr = 0;
479 int i, ac; 479 int i, ac;
480 480
481 if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
482 ref = local->rate_ctrl;
483
481 sinfo->generation = sdata->local->sta_generation; 484 sinfo->generation = sdata->local->sta_generation;
482 485
483 sinfo->filled = STATION_INFO_INACTIVE_TIME | 486 sinfo->filled = STATION_INFO_INACTIVE_TIME |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5214686d9fd1..1a252c606ad0 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -414,6 +414,9 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
414 if (ieee80211_has_order(hdr->frame_control)) 414 if (ieee80211_has_order(hdr->frame_control))
415 return TX_CONTINUE; 415 return TX_CONTINUE;
416 416
417 if (ieee80211_is_probe_req(hdr->frame_control))
418 return TX_CONTINUE;
419
417 if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) 420 if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
418 info->hw_queue = tx->sdata->vif.cab_queue; 421 info->hw_queue = tx->sdata->vif.cab_queue;
419 422
@@ -463,6 +466,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
463{ 466{
464 struct sta_info *sta = tx->sta; 467 struct sta_info *sta = tx->sta;
465 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 468 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
469 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
466 struct ieee80211_local *local = tx->local; 470 struct ieee80211_local *local = tx->local;
467 471
468 if (unlikely(!sta)) 472 if (unlikely(!sta))
@@ -473,6 +477,12 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
473 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { 477 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
474 int ac = skb_get_queue_mapping(tx->skb); 478 int ac = skb_get_queue_mapping(tx->skb);
475 479
480 if (ieee80211_is_mgmt(hdr->frame_control) &&
481 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
482 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
483 return TX_CONTINUE;
484 }
485
476 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", 486 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
477 sta->sta.addr, sta->sta.aid, ac); 487 sta->sta.addr, sta->sta.aid, ac);
478 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 488 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -531,19 +541,9 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
531static ieee80211_tx_result debug_noinline 541static ieee80211_tx_result debug_noinline
532ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) 542ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
533{ 543{
534 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
535 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
536
537 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) 544 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
538 return TX_CONTINUE; 545 return TX_CONTINUE;
539 546
540 if (ieee80211_is_mgmt(hdr->frame_control) &&
541 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
542 if (tx->flags & IEEE80211_TX_UNICAST)
543 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
544 return TX_CONTINUE;
545 }
546
547 if (tx->flags & IEEE80211_TX_UNICAST) 547 if (tx->flags & IEEE80211_TX_UNICAST)
548 return ieee80211_tx_h_unicast_ps_buf(tx); 548 return ieee80211_tx_h_unicast_ps_buf(tx);
549 else 549 else
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 6886601afe1c..a6cda52ed920 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1096,11 +1096,12 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1096 int err; 1096 int err;
1097 1097
1098 /* 24 + 6 = header + auth_algo + auth_transaction + status_code */ 1098 /* 24 + 6 = header + auth_algo + auth_transaction + status_code */
1099 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len); 1099 skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN +
1100 24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN);
1100 if (!skb) 1101 if (!skb)
1101 return; 1102 return;
1102 1103
1103 skb_reserve(skb, local->hw.extra_tx_headroom); 1104 skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN);
1104 1105
1105 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); 1106 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
1106 memset(mgmt, 0, 24 + 6); 1107 memset(mgmt, 0, 24 + 6);
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index a8eb0a89326a..610e19c0e13f 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -797,7 +797,6 @@ static void ip_vs_conn_expire(unsigned long data)
797 ip_vs_control_del(cp); 797 ip_vs_control_del(cp);
798 798
799 if (cp->flags & IP_VS_CONN_F_NFCT) { 799 if (cp->flags & IP_VS_CONN_F_NFCT) {
800 ip_vs_conn_drop_conntrack(cp);
801 /* Do not access conntracks during subsys cleanup 800 /* Do not access conntracks during subsys cleanup
802 * because nf_conntrack_find_get can not be used after 801 * because nf_conntrack_find_get can not be used after
803 * conntrack cleanup for the net. 802 * conntrack cleanup for the net.
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ab4566cfcbe4..8746ff9a8357 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -35,7 +35,7 @@ int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
35{ 35{
36 INIT_LIST_HEAD(&afi->tables); 36 INIT_LIST_HEAD(&afi->tables);
37 nfnl_lock(NFNL_SUBSYS_NFTABLES); 37 nfnl_lock(NFNL_SUBSYS_NFTABLES);
38 list_add_tail(&afi->list, &net->nft.af_info); 38 list_add_tail_rcu(&afi->list, &net->nft.af_info);
39 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 39 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
40 return 0; 40 return 0;
41} 41}
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(nft_register_afinfo);
51void nft_unregister_afinfo(struct nft_af_info *afi) 51void nft_unregister_afinfo(struct nft_af_info *afi)
52{ 52{
53 nfnl_lock(NFNL_SUBSYS_NFTABLES); 53 nfnl_lock(NFNL_SUBSYS_NFTABLES);
54 list_del(&afi->list); 54 list_del_rcu(&afi->list);
55 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 55 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
56} 56}
57EXPORT_SYMBOL_GPL(nft_unregister_afinfo); 57EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
@@ -277,11 +277,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
277 struct net *net = sock_net(skb->sk); 277 struct net *net = sock_net(skb->sk);
278 int family = nfmsg->nfgen_family; 278 int family = nfmsg->nfgen_family;
279 279
280 list_for_each_entry(afi, &net->nft.af_info, list) { 280 rcu_read_lock();
281 cb->seq = net->nft.base_seq;
282
283 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
281 if (family != NFPROTO_UNSPEC && family != afi->family) 284 if (family != NFPROTO_UNSPEC && family != afi->family)
282 continue; 285 continue;
283 286
284 list_for_each_entry(table, &afi->tables, list) { 287 list_for_each_entry_rcu(table, &afi->tables, list) {
285 if (idx < s_idx) 288 if (idx < s_idx)
286 goto cont; 289 goto cont;
287 if (idx > s_idx) 290 if (idx > s_idx)
@@ -294,11 +297,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
294 NLM_F_MULTI, 297 NLM_F_MULTI,
295 afi->family, table) < 0) 298 afi->family, table) < 0)
296 goto done; 299 goto done;
300
301 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
297cont: 302cont:
298 idx++; 303 idx++;
299 } 304 }
300 } 305 }
301done: 306done:
307 rcu_read_unlock();
302 cb->args[0] = idx; 308 cb->args[0] = idx;
303 return skb->len; 309 return skb->len;
304} 310}
@@ -407,6 +413,9 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
407 if (flags & ~NFT_TABLE_F_DORMANT) 413 if (flags & ~NFT_TABLE_F_DORMANT)
408 return -EINVAL; 414 return -EINVAL;
409 415
416 if (flags == ctx->table->flags)
417 return 0;
418
410 trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE, 419 trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
411 sizeof(struct nft_trans_table)); 420 sizeof(struct nft_trans_table));
412 if (trans == NULL) 421 if (trans == NULL)
@@ -514,7 +523,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
514 module_put(afi->owner); 523 module_put(afi->owner);
515 return err; 524 return err;
516 } 525 }
517 list_add_tail(&table->list, &afi->tables); 526 list_add_tail_rcu(&table->list, &afi->tables);
518 return 0; 527 return 0;
519} 528}
520 529
@@ -546,7 +555,7 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
546 if (err < 0) 555 if (err < 0)
547 return err; 556 return err;
548 557
549 list_del(&table->list); 558 list_del_rcu(&table->list);
550 return 0; 559 return 0;
551} 560}
552 561
@@ -635,13 +644,20 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
635{ 644{
636 struct nft_stats *cpu_stats, total; 645 struct nft_stats *cpu_stats, total;
637 struct nlattr *nest; 646 struct nlattr *nest;
647 unsigned int seq;
648 u64 pkts, bytes;
638 int cpu; 649 int cpu;
639 650
640 memset(&total, 0, sizeof(total)); 651 memset(&total, 0, sizeof(total));
641 for_each_possible_cpu(cpu) { 652 for_each_possible_cpu(cpu) {
642 cpu_stats = per_cpu_ptr(stats, cpu); 653 cpu_stats = per_cpu_ptr(stats, cpu);
643 total.pkts += cpu_stats->pkts; 654 do {
644 total.bytes += cpu_stats->bytes; 655 seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
656 pkts = cpu_stats->pkts;
657 bytes = cpu_stats->bytes;
658 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
659 total.pkts += pkts;
660 total.bytes += bytes;
645 } 661 }
646 nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS); 662 nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
647 if (nest == NULL) 663 if (nest == NULL)
@@ -761,12 +777,15 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
761 struct net *net = sock_net(skb->sk); 777 struct net *net = sock_net(skb->sk);
762 int family = nfmsg->nfgen_family; 778 int family = nfmsg->nfgen_family;
763 779
764 list_for_each_entry(afi, &net->nft.af_info, list) { 780 rcu_read_lock();
781 cb->seq = net->nft.base_seq;
782
783 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
765 if (family != NFPROTO_UNSPEC && family != afi->family) 784 if (family != NFPROTO_UNSPEC && family != afi->family)
766 continue; 785 continue;
767 786
768 list_for_each_entry(table, &afi->tables, list) { 787 list_for_each_entry_rcu(table, &afi->tables, list) {
769 list_for_each_entry(chain, &table->chains, list) { 788 list_for_each_entry_rcu(chain, &table->chains, list) {
770 if (idx < s_idx) 789 if (idx < s_idx)
771 goto cont; 790 goto cont;
772 if (idx > s_idx) 791 if (idx > s_idx)
@@ -778,17 +797,19 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
778 NLM_F_MULTI, 797 NLM_F_MULTI,
779 afi->family, table, chain) < 0) 798 afi->family, table, chain) < 0)
780 goto done; 799 goto done;
800
801 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
781cont: 802cont:
782 idx++; 803 idx++;
783 } 804 }
784 } 805 }
785 } 806 }
786done: 807done:
808 rcu_read_unlock();
787 cb->args[0] = idx; 809 cb->args[0] = idx;
788 return skb->len; 810 return skb->len;
789} 811}
790 812
791
792static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb, 813static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
793 const struct nlmsghdr *nlh, 814 const struct nlmsghdr *nlh,
794 const struct nlattr * const nla[]) 815 const struct nlattr * const nla[])
@@ -861,7 +882,7 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
861 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS]) 882 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
862 return ERR_PTR(-EINVAL); 883 return ERR_PTR(-EINVAL);
863 884
864 newstats = alloc_percpu(struct nft_stats); 885 newstats = netdev_alloc_pcpu_stats(struct nft_stats);
865 if (newstats == NULL) 886 if (newstats == NULL)
866 return ERR_PTR(-ENOMEM); 887 return ERR_PTR(-ENOMEM);
867 888
@@ -1077,7 +1098,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1077 } 1098 }
1078 basechain->stats = stats; 1099 basechain->stats = stats;
1079 } else { 1100 } else {
1080 stats = alloc_percpu(struct nft_stats); 1101 stats = netdev_alloc_pcpu_stats(struct nft_stats);
1081 if (IS_ERR(stats)) { 1102 if (IS_ERR(stats)) {
1082 module_put(type->owner); 1103 module_put(type->owner);
1083 kfree(basechain); 1104 kfree(basechain);
@@ -1130,7 +1151,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1130 goto err2; 1151 goto err2;
1131 1152
1132 table->use++; 1153 table->use++;
1133 list_add_tail(&chain->list, &table->chains); 1154 list_add_tail_rcu(&chain->list, &table->chains);
1134 return 0; 1155 return 0;
1135err2: 1156err2:
1136 if (!(table->flags & NFT_TABLE_F_DORMANT) && 1157 if (!(table->flags & NFT_TABLE_F_DORMANT) &&
@@ -1180,7 +1201,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1180 return err; 1201 return err;
1181 1202
1182 table->use--; 1203 table->use--;
1183 list_del(&chain->list); 1204 list_del_rcu(&chain->list);
1184 return 0; 1205 return 0;
1185} 1206}
1186 1207
@@ -1199,9 +1220,9 @@ int nft_register_expr(struct nft_expr_type *type)
1199{ 1220{
1200 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1221 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1201 if (type->family == NFPROTO_UNSPEC) 1222 if (type->family == NFPROTO_UNSPEC)
1202 list_add_tail(&type->list, &nf_tables_expressions); 1223 list_add_tail_rcu(&type->list, &nf_tables_expressions);
1203 else 1224 else
1204 list_add(&type->list, &nf_tables_expressions); 1225 list_add_rcu(&type->list, &nf_tables_expressions);
1205 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1226 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1206 return 0; 1227 return 0;
1207} 1228}
@@ -1216,7 +1237,7 @@ EXPORT_SYMBOL_GPL(nft_register_expr);
1216void nft_unregister_expr(struct nft_expr_type *type) 1237void nft_unregister_expr(struct nft_expr_type *type)
1217{ 1238{
1218 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1239 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1219 list_del(&type->list); 1240 list_del_rcu(&type->list);
1220 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1241 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1221} 1242}
1222EXPORT_SYMBOL_GPL(nft_unregister_expr); 1243EXPORT_SYMBOL_GPL(nft_unregister_expr);
@@ -1549,16 +1570,17 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
1549 unsigned int idx = 0, s_idx = cb->args[0]; 1570 unsigned int idx = 0, s_idx = cb->args[0];
1550 struct net *net = sock_net(skb->sk); 1571 struct net *net = sock_net(skb->sk);
1551 int family = nfmsg->nfgen_family; 1572 int family = nfmsg->nfgen_family;
1552 u8 genctr = ACCESS_ONCE(net->nft.genctr);
1553 u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
1554 1573
1555 list_for_each_entry(afi, &net->nft.af_info, list) { 1574 rcu_read_lock();
1575 cb->seq = net->nft.base_seq;
1576
1577 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
1556 if (family != NFPROTO_UNSPEC && family != afi->family) 1578 if (family != NFPROTO_UNSPEC && family != afi->family)
1557 continue; 1579 continue;
1558 1580
1559 list_for_each_entry(table, &afi->tables, list) { 1581 list_for_each_entry_rcu(table, &afi->tables, list) {
1560 list_for_each_entry(chain, &table->chains, list) { 1582 list_for_each_entry_rcu(chain, &table->chains, list) {
1561 list_for_each_entry(rule, &chain->rules, list) { 1583 list_for_each_entry_rcu(rule, &chain->rules, list) {
1562 if (!nft_rule_is_active(net, rule)) 1584 if (!nft_rule_is_active(net, rule))
1563 goto cont; 1585 goto cont;
1564 if (idx < s_idx) 1586 if (idx < s_idx)
@@ -1572,6 +1594,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
1572 NLM_F_MULTI | NLM_F_APPEND, 1594 NLM_F_MULTI | NLM_F_APPEND,
1573 afi->family, table, chain, rule) < 0) 1595 afi->family, table, chain, rule) < 0)
1574 goto done; 1596 goto done;
1597
1598 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1575cont: 1599cont:
1576 idx++; 1600 idx++;
1577 } 1601 }
@@ -1579,9 +1603,7 @@ cont:
1579 } 1603 }
1580 } 1604 }
1581done: 1605done:
1582 /* Invalidate this dump, a transition to the new generation happened */ 1606 rcu_read_unlock();
1583 if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
1584 return -EBUSY;
1585 1607
1586 cb->args[0] = idx; 1608 cb->args[0] = idx;
1587 return skb->len; 1609 return skb->len;
@@ -1932,7 +1954,7 @@ static LIST_HEAD(nf_tables_set_ops);
1932int nft_register_set(struct nft_set_ops *ops) 1954int nft_register_set(struct nft_set_ops *ops)
1933{ 1955{
1934 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1956 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1935 list_add_tail(&ops->list, &nf_tables_set_ops); 1957 list_add_tail_rcu(&ops->list, &nf_tables_set_ops);
1936 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1958 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1937 return 0; 1959 return 0;
1938} 1960}
@@ -1941,7 +1963,7 @@ EXPORT_SYMBOL_GPL(nft_register_set);
1941void nft_unregister_set(struct nft_set_ops *ops) 1963void nft_unregister_set(struct nft_set_ops *ops)
1942{ 1964{
1943 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1965 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1944 list_del(&ops->list); 1966 list_del_rcu(&ops->list);
1945 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1967 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1946} 1968}
1947EXPORT_SYMBOL_GPL(nft_unregister_set); 1969EXPORT_SYMBOL_GPL(nft_unregister_set);
@@ -2234,7 +2256,10 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
2234 if (cb->args[1]) 2256 if (cb->args[1])
2235 return skb->len; 2257 return skb->len;
2236 2258
2237 list_for_each_entry(set, &ctx->table->sets, list) { 2259 rcu_read_lock();
2260 cb->seq = ctx->net->nft.base_seq;
2261
2262 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2238 if (idx < s_idx) 2263 if (idx < s_idx)
2239 goto cont; 2264 goto cont;
2240 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, 2265 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2242,11 +2267,13 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
2242 cb->args[0] = idx; 2267 cb->args[0] = idx;
2243 goto done; 2268 goto done;
2244 } 2269 }
2270 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2245cont: 2271cont:
2246 idx++; 2272 idx++;
2247 } 2273 }
2248 cb->args[1] = 1; 2274 cb->args[1] = 1;
2249done: 2275done:
2276 rcu_read_unlock();
2250 return skb->len; 2277 return skb->len;
2251} 2278}
2252 2279
@@ -2260,7 +2287,10 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2260 if (cb->args[1]) 2287 if (cb->args[1])
2261 return skb->len; 2288 return skb->len;
2262 2289
2263 list_for_each_entry(table, &ctx->afi->tables, list) { 2290 rcu_read_lock();
2291 cb->seq = ctx->net->nft.base_seq;
2292
2293 list_for_each_entry_rcu(table, &ctx->afi->tables, list) {
2264 if (cur_table) { 2294 if (cur_table) {
2265 if (cur_table != table) 2295 if (cur_table != table)
2266 continue; 2296 continue;
@@ -2269,7 +2299,7 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2269 } 2299 }
2270 ctx->table = table; 2300 ctx->table = table;
2271 idx = 0; 2301 idx = 0;
2272 list_for_each_entry(set, &ctx->table->sets, list) { 2302 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2273 if (idx < s_idx) 2303 if (idx < s_idx)
2274 goto cont; 2304 goto cont;
2275 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, 2305 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2278,12 +2308,14 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2278 cb->args[2] = (unsigned long) table; 2308 cb->args[2] = (unsigned long) table;
2279 goto done; 2309 goto done;
2280 } 2310 }
2311 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2281cont: 2312cont:
2282 idx++; 2313 idx++;
2283 } 2314 }
2284 } 2315 }
2285 cb->args[1] = 1; 2316 cb->args[1] = 1;
2286done: 2317done:
2318 rcu_read_unlock();
2287 return skb->len; 2319 return skb->len;
2288} 2320}
2289 2321
@@ -2300,7 +2332,10 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2300 if (cb->args[1]) 2332 if (cb->args[1])
2301 return skb->len; 2333 return skb->len;
2302 2334
2303 list_for_each_entry(afi, &net->nft.af_info, list) { 2335 rcu_read_lock();
2336 cb->seq = net->nft.base_seq;
2337
2338 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
2304 if (cur_family) { 2339 if (cur_family) {
2305 if (afi->family != cur_family) 2340 if (afi->family != cur_family)
2306 continue; 2341 continue;
@@ -2308,7 +2343,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2308 cur_family = 0; 2343 cur_family = 0;
2309 } 2344 }
2310 2345
2311 list_for_each_entry(table, &afi->tables, list) { 2346 list_for_each_entry_rcu(table, &afi->tables, list) {
2312 if (cur_table) { 2347 if (cur_table) {
2313 if (cur_table != table) 2348 if (cur_table != table)
2314 continue; 2349 continue;
@@ -2319,7 +2354,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2319 ctx->table = table; 2354 ctx->table = table;
2320 ctx->afi = afi; 2355 ctx->afi = afi;
2321 idx = 0; 2356 idx = 0;
2322 list_for_each_entry(set, &ctx->table->sets, list) { 2357 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2323 if (idx < s_idx) 2358 if (idx < s_idx)
2324 goto cont; 2359 goto cont;
2325 if (nf_tables_fill_set(skb, ctx, set, 2360 if (nf_tables_fill_set(skb, ctx, set,
@@ -2330,6 +2365,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2330 cb->args[3] = afi->family; 2365 cb->args[3] = afi->family;
2331 goto done; 2366 goto done;
2332 } 2367 }
2368 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2333cont: 2369cont:
2334 idx++; 2370 idx++;
2335 } 2371 }
@@ -2339,6 +2375,7 @@ cont:
2339 } 2375 }
2340 cb->args[1] = 1; 2376 cb->args[1] = 1;
2341done: 2377done:
2378 rcu_read_unlock();
2342 return skb->len; 2379 return skb->len;
2343} 2380}
2344 2381
@@ -2597,7 +2634,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2597 if (err < 0) 2634 if (err < 0)
2598 goto err2; 2635 goto err2;
2599 2636
2600 list_add_tail(&set->list, &table->sets); 2637 list_add_tail_rcu(&set->list, &table->sets);
2601 table->use++; 2638 table->use++;
2602 return 0; 2639 return 0;
2603 2640
@@ -2617,7 +2654,7 @@ static void nft_set_destroy(struct nft_set *set)
2617 2654
2618static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) 2655static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
2619{ 2656{
2620 list_del(&set->list); 2657 list_del_rcu(&set->list);
2621 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); 2658 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
2622 nft_set_destroy(set); 2659 nft_set_destroy(set);
2623} 2660}
@@ -2652,7 +2689,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2652 if (err < 0) 2689 if (err < 0)
2653 return err; 2690 return err;
2654 2691
2655 list_del(&set->list); 2692 list_del_rcu(&set->list);
2656 ctx.table->use--; 2693 ctx.table->use--;
2657 return 0; 2694 return 0;
2658} 2695}
@@ -2704,14 +2741,14 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
2704 } 2741 }
2705bind: 2742bind:
2706 binding->chain = ctx->chain; 2743 binding->chain = ctx->chain;
2707 list_add_tail(&binding->list, &set->bindings); 2744 list_add_tail_rcu(&binding->list, &set->bindings);
2708 return 0; 2745 return 0;
2709} 2746}
2710 2747
2711void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, 2748void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
2712 struct nft_set_binding *binding) 2749 struct nft_set_binding *binding)
2713{ 2750{
2714 list_del(&binding->list); 2751 list_del_rcu(&binding->list);
2715 2752
2716 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS && 2753 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
2717 !(set->flags & NFT_SET_INACTIVE)) 2754 !(set->flags & NFT_SET_INACTIVE))
@@ -3346,7 +3383,7 @@ static int nf_tables_commit(struct sk_buff *skb)
3346 struct nft_set *set; 3383 struct nft_set *set;
3347 3384
3348 /* Bump generation counter, invalidate any dump in progress */ 3385 /* Bump generation counter, invalidate any dump in progress */
3349 net->nft.genctr++; 3386 while (++net->nft.base_seq == 0);
3350 3387
3351 /* A new generation has just started */ 3388 /* A new generation has just started */
3352 net->nft.gencursor = gencursor_next(net); 3389 net->nft.gencursor = gencursor_next(net);
@@ -3491,12 +3528,12 @@ static int nf_tables_abort(struct sk_buff *skb)
3491 } 3528 }
3492 nft_trans_destroy(trans); 3529 nft_trans_destroy(trans);
3493 } else { 3530 } else {
3494 list_del(&trans->ctx.table->list); 3531 list_del_rcu(&trans->ctx.table->list);
3495 } 3532 }
3496 break; 3533 break;
3497 case NFT_MSG_DELTABLE: 3534 case NFT_MSG_DELTABLE:
3498 list_add_tail(&trans->ctx.table->list, 3535 list_add_tail_rcu(&trans->ctx.table->list,
3499 &trans->ctx.afi->tables); 3536 &trans->ctx.afi->tables);
3500 nft_trans_destroy(trans); 3537 nft_trans_destroy(trans);
3501 break; 3538 break;
3502 case NFT_MSG_NEWCHAIN: 3539 case NFT_MSG_NEWCHAIN:
@@ -3507,7 +3544,7 @@ static int nf_tables_abort(struct sk_buff *skb)
3507 nft_trans_destroy(trans); 3544 nft_trans_destroy(trans);
3508 } else { 3545 } else {
3509 trans->ctx.table->use--; 3546 trans->ctx.table->use--;
3510 list_del(&trans->ctx.chain->list); 3547 list_del_rcu(&trans->ctx.chain->list);
3511 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) && 3548 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
3512 trans->ctx.chain->flags & NFT_BASE_CHAIN) { 3549 trans->ctx.chain->flags & NFT_BASE_CHAIN) {
3513 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops, 3550 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
@@ -3517,8 +3554,8 @@ static int nf_tables_abort(struct sk_buff *skb)
3517 break; 3554 break;
3518 case NFT_MSG_DELCHAIN: 3555 case NFT_MSG_DELCHAIN:
3519 trans->ctx.table->use++; 3556 trans->ctx.table->use++;
3520 list_add_tail(&trans->ctx.chain->list, 3557 list_add_tail_rcu(&trans->ctx.chain->list,
3521 &trans->ctx.table->chains); 3558 &trans->ctx.table->chains);
3522 nft_trans_destroy(trans); 3559 nft_trans_destroy(trans);
3523 break; 3560 break;
3524 case NFT_MSG_NEWRULE: 3561 case NFT_MSG_NEWRULE:
@@ -3532,12 +3569,12 @@ static int nf_tables_abort(struct sk_buff *skb)
3532 break; 3569 break;
3533 case NFT_MSG_NEWSET: 3570 case NFT_MSG_NEWSET:
3534 trans->ctx.table->use--; 3571 trans->ctx.table->use--;
3535 list_del(&nft_trans_set(trans)->list); 3572 list_del_rcu(&nft_trans_set(trans)->list);
3536 break; 3573 break;
3537 case NFT_MSG_DELSET: 3574 case NFT_MSG_DELSET:
3538 trans->ctx.table->use++; 3575 trans->ctx.table->use++;
3539 list_add_tail(&nft_trans_set(trans)->list, 3576 list_add_tail_rcu(&nft_trans_set(trans)->list,
3540 &trans->ctx.table->sets); 3577 &trans->ctx.table->sets);
3541 nft_trans_destroy(trans); 3578 nft_trans_destroy(trans);
3542 break; 3579 break;
3543 case NFT_MSG_NEWSETELEM: 3580 case NFT_MSG_NEWSETELEM:
@@ -3951,6 +3988,7 @@ static int nf_tables_init_net(struct net *net)
3951{ 3988{
3952 INIT_LIST_HEAD(&net->nft.af_info); 3989 INIT_LIST_HEAD(&net->nft.af_info);
3953 INIT_LIST_HEAD(&net->nft.commit_list); 3990 INIT_LIST_HEAD(&net->nft.commit_list);
3991 net->nft.base_seq = 1;
3954 return 0; 3992 return 0;
3955} 3993}
3956 3994
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 345acfb1720b..3b90eb2b2c55 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -109,7 +109,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
109 struct nft_data data[NFT_REG_MAX + 1]; 109 struct nft_data data[NFT_REG_MAX + 1];
110 unsigned int stackptr = 0; 110 unsigned int stackptr = 0;
111 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE]; 111 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
112 struct nft_stats __percpu *stats; 112 struct nft_stats *stats;
113 int rulenum; 113 int rulenum;
114 /* 114 /*
115 * Cache cursor to avoid problems in case that the cursor is updated 115 * Cache cursor to avoid problems in case that the cursor is updated
@@ -205,9 +205,11 @@ next_rule:
205 nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY); 205 nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
206 206
207 rcu_read_lock_bh(); 207 rcu_read_lock_bh();
208 stats = rcu_dereference(nft_base_chain(basechain)->stats); 208 stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
209 __this_cpu_inc(stats->pkts); 209 u64_stats_update_begin(&stats->syncp);
210 __this_cpu_add(stats->bytes, pkt->skb->len); 210 stats->pkts++;
211 stats->bytes += pkt->skb->len;
212 u64_stats_update_end(&stats->syncp);
211 rcu_read_unlock_bh(); 213 rcu_read_unlock_bh();
212 214
213 return nft_base_chain(basechain)->policy; 215 return nft_base_chain(basechain)->policy;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 15c731f03fa6..e6fac7e3db52 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -636,7 +636,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
636 while (nlk->cb_running && netlink_dump_space(nlk)) { 636 while (nlk->cb_running && netlink_dump_space(nlk)) {
637 err = netlink_dump(sk); 637 err = netlink_dump(sk);
638 if (err < 0) { 638 if (err < 0) {
639 sk->sk_err = err; 639 sk->sk_err = -err;
640 sk->sk_error_report(sk); 640 sk->sk_error_report(sk);
641 break; 641 break;
642 } 642 }
@@ -2483,7 +2483,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2483 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { 2483 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2484 ret = netlink_dump(sk); 2484 ret = netlink_dump(sk);
2485 if (ret) { 2485 if (ret) {
2486 sk->sk_err = ret; 2486 sk->sk_err = -ret;
2487 sk->sk_error_report(sk); 2487 sk->sk_error_report(sk);
2488 } 2488 }
2489 } 2489 }
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index c36856a457ca..e70d8b18e962 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -551,6 +551,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
551 551
552 case OVS_ACTION_ATTR_SAMPLE: 552 case OVS_ACTION_ATTR_SAMPLE:
553 err = sample(dp, skb, a); 553 err = sample(dp, skb, a);
554 if (unlikely(err)) /* skb already freed. */
555 return err;
554 break; 556 break;
555 } 557 }
556 558
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 0d407bca81e3..9db4bf6740d1 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2013 Nicira, Inc. 2 * Copyright (c) 2007-2014 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -276,7 +276,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
276 OVS_CB(skb)->flow = flow; 276 OVS_CB(skb)->flow = flow;
277 OVS_CB(skb)->pkt_key = &key; 277 OVS_CB(skb)->pkt_key = &key;
278 278
279 ovs_flow_stats_update(OVS_CB(skb)->flow, skb); 279 ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
280 ovs_execute_actions(dp, skb); 280 ovs_execute_actions(dp, skb);
281 stats_counter = &stats->n_hit; 281 stats_counter = &stats->n_hit;
282 282
@@ -889,8 +889,11 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
889 } 889 }
890 /* The unmasked key has to be the same for flow updates. */ 890 /* The unmasked key has to be the same for flow updates. */
891 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) { 891 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
892 error = -EEXIST; 892 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
893 goto err_unlock_ovs; 893 if (!flow) {
894 error = -ENOENT;
895 goto err_unlock_ovs;
896 }
894 } 897 }
895 /* Update actions. */ 898 /* Update actions. */
896 old_acts = ovsl_dereference(flow->sf_acts); 899 old_acts = ovsl_dereference(flow->sf_acts);
@@ -981,16 +984,12 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
981 goto err_unlock_ovs; 984 goto err_unlock_ovs;
982 } 985 }
983 /* Check that the flow exists. */ 986 /* Check that the flow exists. */
984 flow = ovs_flow_tbl_lookup(&dp->table, &key); 987 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
985 if (unlikely(!flow)) { 988 if (unlikely(!flow)) {
986 error = -ENOENT; 989 error = -ENOENT;
987 goto err_unlock_ovs; 990 goto err_unlock_ovs;
988 } 991 }
989 /* The unmasked key has to be the same for flow updates. */ 992
990 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
991 error = -EEXIST;
992 goto err_unlock_ovs;
993 }
994 /* Update actions, if present. */ 993 /* Update actions, if present. */
995 if (likely(acts)) { 994 if (likely(acts)) {
996 old_acts = ovsl_dereference(flow->sf_acts); 995 old_acts = ovsl_dereference(flow->sf_acts);
@@ -1063,8 +1062,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1063 goto unlock; 1062 goto unlock;
1064 } 1063 }
1065 1064
1066 flow = ovs_flow_tbl_lookup(&dp->table, &key); 1065 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1067 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { 1066 if (!flow) {
1068 err = -ENOENT; 1067 err = -ENOENT;
1069 goto unlock; 1068 goto unlock;
1070 } 1069 }
@@ -1113,8 +1112,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1113 goto unlock; 1112 goto unlock;
1114 } 1113 }
1115 1114
1116 flow = ovs_flow_tbl_lookup(&dp->table, &key); 1115 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1117 if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) { 1116 if (unlikely(!flow)) {
1118 err = -ENOENT; 1117 err = -ENOENT;
1119 goto unlock; 1118 goto unlock;
1120 } 1119 }
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 334751cb1528..d07ab538fc9d 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -61,10 +61,10 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
61 61
62#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) 62#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
63 63
64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) 64void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
65 struct sk_buff *skb)
65{ 66{
66 struct flow_stats *stats; 67 struct flow_stats *stats;
67 __be16 tcp_flags = flow->key.tp.flags;
68 int node = numa_node_id(); 68 int node = numa_node_id();
69 69
70 stats = rcu_dereference(flow->stats[node]); 70 stats = rcu_dereference(flow->stats[node]);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index ac395d2cd821..5e5aaed3a85b 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2013 Nicira, Inc. 2 * Copyright (c) 2007-2014 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -180,7 +180,8 @@ struct arp_eth_header {
180 unsigned char ar_tip[4]; /* target IP address */ 180 unsigned char ar_tip[4]; /* target IP address */
181} __packed; 181} __packed;
182 182
183void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *); 183void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
184 struct sk_buff *);
184void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *, 185void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
185 unsigned long *used, __be16 *tcp_flags); 186 unsigned long *used, __be16 *tcp_flags);
186void ovs_flow_stats_clear(struct sw_flow *); 187void ovs_flow_stats_clear(struct sw_flow *);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 574c3abc9b30..cf2d853646f0 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -456,6 +456,22 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
456 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); 456 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
457} 457}
458 458
459struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
460 struct sw_flow_match *match)
461{
462 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
463 struct sw_flow_mask *mask;
464 struct sw_flow *flow;
465
466 /* Always called under ovs-mutex. */
467 list_for_each_entry(mask, &tbl->mask_list, list) {
468 flow = masked_flow_lookup(ti, match->key, mask);
469 if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */
470 return flow;
471 }
472 return NULL;
473}
474
459int ovs_flow_tbl_num_masks(const struct flow_table *table) 475int ovs_flow_tbl_num_masks(const struct flow_table *table)
460{ 476{
461 struct sw_flow_mask *mask; 477 struct sw_flow_mask *mask;
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index ca8a5820f615..5918bff7f3f6 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -76,7 +76,8 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
76 u32 *n_mask_hit); 76 u32 *n_mask_hit);
77struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, 77struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
78 const struct sw_flow_key *); 78 const struct sw_flow_key *);
79 79struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
80 struct sw_flow_match *match);
80bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, 81bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
81 struct sw_flow_match *match); 82 struct sw_flow_match *match);
82 83
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 35ec4fed09e2..f49148a07da2 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -110,6 +110,22 @@ static int gre_rcv(struct sk_buff *skb,
110 return PACKET_RCVD; 110 return PACKET_RCVD;
111} 111}
112 112
113/* Called with rcu_read_lock and BH disabled. */
114static int gre_err(struct sk_buff *skb, u32 info,
115 const struct tnl_ptk_info *tpi)
116{
117 struct ovs_net *ovs_net;
118 struct vport *vport;
119
120 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
121 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
122
123 if (unlikely(!vport))
124 return PACKET_REJECT;
125 else
126 return PACKET_RCVD;
127}
128
113static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) 129static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
114{ 130{
115 struct net *net = ovs_dp_get_net(vport->dp); 131 struct net *net = ovs_dp_get_net(vport->dp);
@@ -186,6 +202,7 @@ error:
186 202
187static struct gre_cisco_protocol gre_protocol = { 203static struct gre_cisco_protocol gre_protocol = {
188 .handler = gre_rcv, 204 .handler = gre_rcv,
205 .err_handler = gre_err,
189 .priority = 1, 206 .priority = 1,
190}; 207};
191 208
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index c39b583ace32..70c0be8d0121 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -38,6 +38,7 @@
38#include <linux/errno.h> 38#include <linux/errno.h>
39#include <linux/rtnetlink.h> 39#include <linux/rtnetlink.h>
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/bitmap.h>
41#include <net/netlink.h> 42#include <net/netlink.h>
42#include <net/act_api.h> 43#include <net/act_api.h>
43#include <net/pkt_cls.h> 44#include <net/pkt_cls.h>
@@ -460,17 +461,25 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
460 return 0; 461 return 0;
461} 462}
462 463
464#define NR_U32_NODE (1<<12)
463static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) 465static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
464{ 466{
465 struct tc_u_knode *n; 467 struct tc_u_knode *n;
466 unsigned int i = 0x7FF; 468 unsigned long i;
469 unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
470 GFP_KERNEL);
471 if (!bitmap)
472 return handle | 0xFFF;
467 473
468 for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next) 474 for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
469 if (i < TC_U32_NODE(n->handle)) 475 set_bit(TC_U32_NODE(n->handle), bitmap);
470 i = TC_U32_NODE(n->handle);
471 i++;
472 476
473 return handle | (i > 0xFFF ? 0xFFF : i); 477 i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
478 if (i >= NR_U32_NODE)
479 i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
480
481 kfree(bitmap);
482 return handle | (i >= NR_U32_NODE ? 0xFFF : i);
474} 483}
475 484
476static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { 485static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 9de23a222d3f..06a9ee6b2d3a 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1097,6 +1097,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
1097 asoc->c = new->c; 1097 asoc->c = new->c;
1098 asoc->peer.rwnd = new->peer.rwnd; 1098 asoc->peer.rwnd = new->peer.rwnd;
1099 asoc->peer.sack_needed = new->peer.sack_needed; 1099 asoc->peer.sack_needed = new->peer.sack_needed;
1100 asoc->peer.auth_capable = new->peer.auth_capable;
1100 asoc->peer.i = new->peer.i; 1101 asoc->peer.i = new->peer.i;
1101 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, 1102 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1102 asoc->peer.i.initial_tsn, GFP_ATOMIC); 1103 asoc->peer.i.initial_tsn, GFP_ATOMIC);
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 85c64658bd0b..b6842fdb53d4 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -366,9 +366,10 @@ fail:
366 * specification [SCTP] and any extensions for a list of possible 366 * specification [SCTP] and any extensions for a list of possible
367 * error formats. 367 * error formats.
368 */ 368 */
369struct sctp_ulpevent *sctp_ulpevent_make_remote_error( 369struct sctp_ulpevent *
370 const struct sctp_association *asoc, struct sctp_chunk *chunk, 370sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
371 __u16 flags, gfp_t gfp) 371 struct sctp_chunk *chunk, __u16 flags,
372 gfp_t gfp)
372{ 373{
373 struct sctp_ulpevent *event; 374 struct sctp_ulpevent *event;
374 struct sctp_remote_error *sre; 375 struct sctp_remote_error *sre;
@@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
387 /* Copy the skb to a new skb with room for us to prepend 388 /* Copy the skb to a new skb with room for us to prepend
388 * notification with. 389 * notification with.
389 */ 390 */
390 skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error), 391 skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
391 0, gfp);
392 392
393 /* Pull off the rest of the cause TLV from the chunk. */ 393 /* Pull off the rest of the cause TLV from the chunk. */
394 skb_pull(chunk->skb, elen); 394 skb_pull(chunk->skb, elen);
@@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
399 event = sctp_skb2event(skb); 399 event = sctp_skb2event(skb);
400 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); 400 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
401 401
402 sre = (struct sctp_remote_error *) 402 sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
403 skb_push(skb, sizeof(struct sctp_remote_error));
404 403
405 /* Trim the buffer to the right length. */ 404 /* Trim the buffer to the right length. */
406 skb_trim(skb, sizeof(struct sctp_remote_error) + elen); 405 skb_trim(skb, sizeof(*sre) + elen);
407 406
408 /* Socket Extensions for SCTP 407 /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
409 * 5.3.1.3 SCTP_REMOTE_ERROR 408 memset(sre, 0, sizeof(*sre));
410 *
411 * sre_type:
412 * It should be SCTP_REMOTE_ERROR.
413 */
414 sre->sre_type = SCTP_REMOTE_ERROR; 409 sre->sre_type = SCTP_REMOTE_ERROR;
415
416 /*
417 * Socket Extensions for SCTP
418 * 5.3.1.3 SCTP_REMOTE_ERROR
419 *
420 * sre_flags: 16 bits (unsigned integer)
421 * Currently unused.
422 */
423 sre->sre_flags = 0; 410 sre->sre_flags = 0;
424
425 /* Socket Extensions for SCTP
426 * 5.3.1.3 SCTP_REMOTE_ERROR
427 *
428 * sre_length: sizeof (__u32)
429 *
430 * This field is the total length of the notification data,
431 * including the notification header.
432 */
433 sre->sre_length = skb->len; 411 sre->sre_length = skb->len;
434
435 /* Socket Extensions for SCTP
436 * 5.3.1.3 SCTP_REMOTE_ERROR
437 *
438 * sre_error: 16 bits (unsigned integer)
439 * This value represents one of the Operational Error causes defined in
440 * the SCTP specification, in network byte order.
441 */
442 sre->sre_error = cause; 412 sre->sre_error = cause;
443
444 /* Socket Extensions for SCTP
445 * 5.3.1.3 SCTP_REMOTE_ERROR
446 *
447 * sre_assoc_id: sizeof (sctp_assoc_t)
448 *
449 * The association id field, holds the identifier for the association.
450 * All notifications for a given association have the same association
451 * identifier. For TCP style socket, this field is ignored.
452 */
453 sctp_ulpevent_set_owner(event, asoc); 413 sctp_ulpevent_set_owner(event, asoc);
454 sre->sre_assoc_id = sctp_assoc2id(asoc); 414 sre->sre_assoc_id = sctp_assoc2id(asoc);
455 415
456 return event; 416 return event;
457
458fail: 417fail:
459 return NULL; 418 return NULL;
460} 419}
@@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
899 return notification->sn_header.sn_type; 858 return notification->sn_header.sn_type;
900} 859}
901 860
902/* Copy out the sndrcvinfo into a msghdr. */ 861/* RFC6458, Section 5.3.2. SCTP Header Information Structure
862 * (SCTP_SNDRCV, DEPRECATED)
863 */
903void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, 864void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
904 struct msghdr *msghdr) 865 struct msghdr *msghdr)
905{ 866{
@@ -908,74 +869,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
908 if (sctp_ulpevent_is_notification(event)) 869 if (sctp_ulpevent_is_notification(event))
909 return; 870 return;
910 871
911 /* Sockets API Extensions for SCTP 872 memset(&sinfo, 0, sizeof(sinfo));
912 * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
913 *
914 * sinfo_stream: 16 bits (unsigned integer)
915 *
916 * For recvmsg() the SCTP stack places the message's stream number in
917 * this value.
918 */
919 sinfo.sinfo_stream = event->stream; 873 sinfo.sinfo_stream = event->stream;
920 /* sinfo_ssn: 16 bits (unsigned integer)
921 *
922 * For recvmsg() this value contains the stream sequence number that
923 * the remote endpoint placed in the DATA chunk. For fragmented
924 * messages this is the same number for all deliveries of the message
925 * (if more than one recvmsg() is needed to read the message).
926 */
927 sinfo.sinfo_ssn = event->ssn; 874 sinfo.sinfo_ssn = event->ssn;
928 /* sinfo_ppid: 32 bits (unsigned integer)
929 *
930 * In recvmsg() this value is
931 * the same information that was passed by the upper layer in the peer
932 * application. Please note that byte order issues are NOT accounted
933 * for and this information is passed opaquely by the SCTP stack from
934 * one end to the other.
935 */
936 sinfo.sinfo_ppid = event->ppid; 875 sinfo.sinfo_ppid = event->ppid;
937 /* sinfo_flags: 16 bits (unsigned integer)
938 *
939 * This field may contain any of the following flags and is composed of
940 * a bitwise OR of these values.
941 *
942 * recvmsg() flags:
943 *
944 * SCTP_UNORDERED - This flag is present when the message was sent
945 * non-ordered.
946 */
947 sinfo.sinfo_flags = event->flags; 876 sinfo.sinfo_flags = event->flags;
948 /* sinfo_tsn: 32 bit (unsigned integer)
949 *
950 * For the receiving side, this field holds a TSN that was
951 * assigned to one of the SCTP Data Chunks.
952 */
953 sinfo.sinfo_tsn = event->tsn; 877 sinfo.sinfo_tsn = event->tsn;
954 /* sinfo_cumtsn: 32 bit (unsigned integer)
955 *
956 * This field will hold the current cumulative TSN as
957 * known by the underlying SCTP layer. Note this field is
958 * ignored when sending and only valid for a receive
959 * operation when sinfo_flags are set to SCTP_UNORDERED.
960 */
961 sinfo.sinfo_cumtsn = event->cumtsn; 878 sinfo.sinfo_cumtsn = event->cumtsn;
962 /* sinfo_assoc_id: sizeof (sctp_assoc_t)
963 *
964 * The association handle field, sinfo_assoc_id, holds the identifier
965 * for the association announced in the COMMUNICATION_UP notification.
966 * All notifications for a given association have the same identifier.
967 * Ignored for one-to-one style sockets.
968 */
969 sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); 879 sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
970 880 /* Context value that is set via SCTP_CONTEXT socket option. */
971 /* context value that is set via SCTP_CONTEXT socket option. */
972 sinfo.sinfo_context = event->asoc->default_rcv_context; 881 sinfo.sinfo_context = event->asoc->default_rcv_context;
973
974 /* These fields are not used while receiving. */ 882 /* These fields are not used while receiving. */
975 sinfo.sinfo_timetolive = 0; 883 sinfo.sinfo_timetolive = 0;
976 884
977 put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV, 885 put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
978 sizeof(struct sctp_sndrcvinfo), (void *)&sinfo); 886 sizeof(sinfo), &sinfo);
979} 887}
980 888
981/* Do accounting for bytes received and hold a reference to the association 889/* Do accounting for bytes received and hold a reference to the association
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 26631679a1fa..55c6c9d3e1ce 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -559,6 +559,7 @@ receive:
559 559
560 buf = node->bclink.deferred_head; 560 buf = node->bclink.deferred_head;
561 node->bclink.deferred_head = buf->next; 561 node->bclink.deferred_head = buf->next;
562 buf->next = NULL;
562 node->bclink.deferred_size--; 563 node->bclink.deferred_size--;
563 goto receive; 564 goto receive;
564 } 565 }
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 8be6e94a1ca9..0a37a472c29f 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -101,9 +101,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
101} 101}
102 102
103/* tipc_buf_append(): Append a buffer to the fragment list of another buffer 103/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
104 * Let first buffer become head buffer 104 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
105 * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0 105 * out: set when successful non-complete reassembly, otherwise NULL
106 * Leaves headbuf pointer at NULL if failure 106 * @*buf: in: the buffer to append. Always defined
107 * out: head buf after sucessful complete reassembly, otherwise NULL
108 * Returns 1 when reassembly complete, otherwise 0
107 */ 109 */
108int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) 110int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
109{ 111{
@@ -122,6 +124,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
122 goto out_free; 124 goto out_free;
123 head = *headbuf = frag; 125 head = *headbuf = frag;
124 skb_frag_list_init(head); 126 skb_frag_list_init(head);
127 *buf = NULL;
125 return 0; 128 return 0;
126 } 129 }
127 if (!head) 130 if (!head)
@@ -150,5 +153,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
150out_free: 153out_free:
151 pr_warn_ratelimited("Unable to build fragment list\n"); 154 pr_warn_ratelimited("Unable to build fragment list\n");
152 kfree_skb(*buf); 155 kfree_skb(*buf);
156 kfree_skb(*headbuf);
157 *buf = *headbuf = NULL;
153 return 0; 158 return 0;
154} 159}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index e9afbf10e756..7e3a3cef7df9 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -424,7 +424,7 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
424 if (end >= start) 424 if (end >= start)
425 return jiffies_to_msecs(end - start); 425 return jiffies_to_msecs(end - start);
426 426
427 return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1); 427 return jiffies_to_msecs(end + (ULONG_MAX - start) + 1);
428} 428}
429 429
430void 430void
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ba4f1723c83a..6668daf69326 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1497,18 +1497,17 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
1497 } 1497 }
1498 CMD(start_p2p_device, START_P2P_DEVICE); 1498 CMD(start_p2p_device, START_P2P_DEVICE);
1499 CMD(set_mcast_rate, SET_MCAST_RATE); 1499 CMD(set_mcast_rate, SET_MCAST_RATE);
1500#ifdef CONFIG_NL80211_TESTMODE
1501 CMD(testmode_cmd, TESTMODE);
1502#endif
1500 if (state->split) { 1503 if (state->split) {
1501 CMD(crit_proto_start, CRIT_PROTOCOL_START); 1504 CMD(crit_proto_start, CRIT_PROTOCOL_START);
1502 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP); 1505 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
1503 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) 1506 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
1504 CMD(channel_switch, CHANNEL_SWITCH); 1507 CMD(channel_switch, CHANNEL_SWITCH);
1508 CMD(set_qos_map, SET_QOS_MAP);
1505 } 1509 }
1506 CMD(set_qos_map, SET_QOS_MAP); 1510 /* add into the if now */
1507
1508#ifdef CONFIG_NL80211_TESTMODE
1509 CMD(testmode_cmd, TESTMODE);
1510#endif
1511
1512#undef CMD 1511#undef CMD
1513 1512
1514 if (rdev->ops->connect || rdev->ops->auth) { 1513 if (rdev->ops->connect || rdev->ops->auth) {
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 558b0e3a02d8..1afdf45db38f 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -935,7 +935,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
935 if (!band_rule_found) 935 if (!band_rule_found)
936 band_rule_found = freq_in_rule_band(fr, center_freq); 936 band_rule_found = freq_in_rule_band(fr, center_freq);
937 937
938 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5)); 938 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
939 939
940 if (band_rule_found && bw_fits) 940 if (band_rule_found && bw_fits)
941 return rr; 941 return rr;
@@ -1019,10 +1019,10 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
1019} 1019}
1020#endif 1020#endif
1021 1021
1022/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency 1022/*
1023 * chan->center_freq fits there. 1023 * Note that right now we assume the desired channel bandwidth
1024 * If there is no such reg_rule, disable the channel, otherwise set the 1024 * is always 20 MHz for each individual channel (HT40 uses 20 MHz
1025 * flags corresponding to the bandwidths allowed in the particular reg_rule 1025 * per channel, the primary and the extension channel).
1026 */ 1026 */
1027static void handle_channel(struct wiphy *wiphy, 1027static void handle_channel(struct wiphy *wiphy,
1028 enum nl80211_reg_initiator initiator, 1028 enum nl80211_reg_initiator initiator,
@@ -1083,12 +1083,8 @@ static void handle_channel(struct wiphy *wiphy,
1083 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1083 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1084 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1084 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1085 1085
1086 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1087 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1088 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1089 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1090 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1086 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1091 bw_flags |= IEEE80211_CHAN_NO_HT40; 1087 bw_flags = IEEE80211_CHAN_NO_HT40;
1092 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1088 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1093 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1089 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1094 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1090 if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1522,12 +1518,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
1522 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1518 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1523 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1519 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1524 1520
1525 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1526 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1527 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1528 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1529 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1521 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1530 bw_flags |= IEEE80211_CHAN_NO_HT40; 1522 bw_flags = IEEE80211_CHAN_NO_HT40;
1531 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1523 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1532 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1524 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1533 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1525 if (max_bandwidth_khz < MHZ_TO_KHZ(160))
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 560ed77084e9..7cc887f9da11 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2094,7 +2094,8 @@ TRACE_EVENT(cfg80211_michael_mic_failure,
2094 MAC_ASSIGN(addr, addr); 2094 MAC_ASSIGN(addr, addr);
2095 __entry->key_type = key_type; 2095 __entry->key_type = key_type;
2096 __entry->key_id = key_id; 2096 __entry->key_id = key_id;
2097 memcpy(__entry->tsc, tsc, 6); 2097 if (tsc)
2098 memcpy(__entry->tsc, tsc, 6);
2098 ), 2099 ),
2099 TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm", 2100 TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
2100 NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type, 2101 NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index a8ef5108e0d8..0525d78ba328 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2097,6 +2097,8 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2097 goto no_transform; 2097 goto no_transform;
2098 } 2098 }
2099 2099
2100 dst_hold(&xdst->u.dst);
2101 xdst->u.dst.flags |= DST_NOCACHE;
2100 route = xdst->route; 2102 route = xdst->route;
2101 } 2103 }
2102 } 2104 }
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 412d9dc3a873..d4db6ebb089d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -177,9 +177,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
177 attrs[XFRMA_ALG_AEAD] || 177 attrs[XFRMA_ALG_AEAD] ||
178 attrs[XFRMA_ALG_CRYPT] || 178 attrs[XFRMA_ALG_CRYPT] ||
179 attrs[XFRMA_ALG_COMP] || 179 attrs[XFRMA_ALG_COMP] ||
180 attrs[XFRMA_TFCPAD] || 180 attrs[XFRMA_TFCPAD])
181 (ntohl(p->id.spi) >= 0x10000))
182
183 goto out; 181 goto out;
184 break; 182 break;
185 183
@@ -207,7 +205,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
207 attrs[XFRMA_ALG_AUTH] || 205 attrs[XFRMA_ALG_AUTH] ||
208 attrs[XFRMA_ALG_AUTH_TRUNC] || 206 attrs[XFRMA_ALG_AUTH_TRUNC] ||
209 attrs[XFRMA_ALG_CRYPT] || 207 attrs[XFRMA_ALG_CRYPT] ||
210 attrs[XFRMA_TFCPAD]) 208 attrs[XFRMA_TFCPAD] ||
209 (ntohl(p->id.spi) >= 0x10000))
211 goto out; 210 goto out;
212 break; 211 break;
213 212
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index 6af50eb80ea7..70faa3a32526 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -379,11 +379,11 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
379 struct special_params *params = bebob->maudio_special_quirk; 379 struct special_params *params = bebob->maudio_special_quirk;
380 int err, id; 380 int err, id;
381 381
382 mutex_lock(&bebob->mutex);
383
384 id = uval->value.enumerated.item[0]; 382 id = uval->value.enumerated.item[0];
385 if (id >= ARRAY_SIZE(special_clk_labels)) 383 if (id >= ARRAY_SIZE(special_clk_labels))
386 return 0; 384 return -EINVAL;
385
386 mutex_lock(&bebob->mutex);
387 387
388 err = avc_maudio_set_special_clk(bebob, id, 388 err = avc_maudio_set_special_clk(bebob, id,
389 params->dig_in_fmt, 389 params->dig_in_fmt,
@@ -391,7 +391,10 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
391 params->clk_lock); 391 params->clk_lock);
392 mutex_unlock(&bebob->mutex); 392 mutex_unlock(&bebob->mutex);
393 393
394 return err >= 0; 394 if (err >= 0)
395 err = 1;
396
397 return err;
395} 398}
396static struct snd_kcontrol_new special_clk_ctl = { 399static struct snd_kcontrol_new special_clk_ctl = {
397 .name = "Clock Source", 400 .name = "Clock Source",
@@ -434,8 +437,8 @@ static struct snd_kcontrol_new special_sync_ctl = {
434 .get = special_sync_ctl_get, 437 .get = special_sync_ctl_get,
435}; 438};
436 439
437/* Digital interface control for special firmware */ 440/* Digital input interface control for special firmware */
438static char *const special_dig_iface_labels[] = { 441static char *const special_dig_in_iface_labels[] = {
439 "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical" 442 "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical"
440}; 443};
441static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl, 444static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
@@ -443,13 +446,13 @@ static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
443{ 446{
444 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; 447 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
445 einf->count = 1; 448 einf->count = 1;
446 einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels); 449 einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels);
447 450
448 if (einf->value.enumerated.item >= einf->value.enumerated.items) 451 if (einf->value.enumerated.item >= einf->value.enumerated.items)
449 einf->value.enumerated.item = einf->value.enumerated.items - 1; 452 einf->value.enumerated.item = einf->value.enumerated.items - 1;
450 453
451 strcpy(einf->value.enumerated.name, 454 strcpy(einf->value.enumerated.name,
452 special_dig_iface_labels[einf->value.enumerated.item]); 455 special_dig_in_iface_labels[einf->value.enumerated.item]);
453 456
454 return 0; 457 return 0;
455} 458}
@@ -491,26 +494,36 @@ static int special_dig_in_iface_ctl_set(struct snd_kcontrol *kctl,
491 unsigned int id, dig_in_fmt, dig_in_iface; 494 unsigned int id, dig_in_fmt, dig_in_iface;
492 int err; 495 int err;
493 496
494 mutex_lock(&bebob->mutex);
495
496 id = uval->value.enumerated.item[0]; 497 id = uval->value.enumerated.item[0];
498 if (id >= ARRAY_SIZE(special_dig_in_iface_labels))
499 return -EINVAL;
497 500
498 /* decode user value */ 501 /* decode user value */
499 dig_in_fmt = (id >> 1) & 0x01; 502 dig_in_fmt = (id >> 1) & 0x01;
500 dig_in_iface = id & 0x01; 503 dig_in_iface = id & 0x01;
501 504
505 mutex_lock(&bebob->mutex);
506
502 err = avc_maudio_set_special_clk(bebob, 507 err = avc_maudio_set_special_clk(bebob,
503 params->clk_src, 508 params->clk_src,
504 dig_in_fmt, 509 dig_in_fmt,
505 params->dig_out_fmt, 510 params->dig_out_fmt,
506 params->clk_lock); 511 params->clk_lock);
507 if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */ 512 if (err < 0)
513 goto end;
514
515 /* For ADAT, optical interface is only available. */
516 if (params->dig_in_fmt > 0) {
517 err = 1;
508 goto end; 518 goto end;
519 }
509 520
521 /* For S/PDIF, optical/coaxial interfaces are selectable. */
510 err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface); 522 err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface);
511 if (err < 0) 523 if (err < 0)
512 dev_err(&bebob->unit->device, 524 dev_err(&bebob->unit->device,
513 "fail to set digital input interface: %d\n", err); 525 "fail to set digital input interface: %d\n", err);
526 err = 1;
514end: 527end:
515 special_stream_formation_set(bebob); 528 special_stream_formation_set(bebob);
516 mutex_unlock(&bebob->mutex); 529 mutex_unlock(&bebob->mutex);
@@ -525,18 +538,22 @@ static struct snd_kcontrol_new special_dig_in_iface_ctl = {
525 .put = special_dig_in_iface_ctl_set 538 .put = special_dig_in_iface_ctl_set
526}; 539};
527 540
541/* Digital output interface control for special firmware */
542static char *const special_dig_out_iface_labels[] = {
543 "S/PDIF Optical and Coaxial", "ADAT Optical"
544};
528static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl, 545static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl,
529 struct snd_ctl_elem_info *einf) 546 struct snd_ctl_elem_info *einf)
530{ 547{
531 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; 548 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
532 einf->count = 1; 549 einf->count = 1;
533 einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1; 550 einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels);
534 551
535 if (einf->value.enumerated.item >= einf->value.enumerated.items) 552 if (einf->value.enumerated.item >= einf->value.enumerated.items)
536 einf->value.enumerated.item = einf->value.enumerated.items - 1; 553 einf->value.enumerated.item = einf->value.enumerated.items - 1;
537 554
538 strcpy(einf->value.enumerated.name, 555 strcpy(einf->value.enumerated.name,
539 special_dig_iface_labels[einf->value.enumerated.item + 1]); 556 special_dig_out_iface_labels[einf->value.enumerated.item]);
540 557
541 return 0; 558 return 0;
542} 559}
@@ -558,16 +575,20 @@ static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl,
558 unsigned int id; 575 unsigned int id;
559 int err; 576 int err;
560 577
561 mutex_lock(&bebob->mutex);
562
563 id = uval->value.enumerated.item[0]; 578 id = uval->value.enumerated.item[0];
579 if (id >= ARRAY_SIZE(special_dig_out_iface_labels))
580 return -EINVAL;
581
582 mutex_lock(&bebob->mutex);
564 583
565 err = avc_maudio_set_special_clk(bebob, 584 err = avc_maudio_set_special_clk(bebob,
566 params->clk_src, 585 params->clk_src,
567 params->dig_in_fmt, 586 params->dig_in_fmt,
568 id, params->clk_lock); 587 id, params->clk_lock);
569 if (err >= 0) 588 if (err >= 0) {
570 special_stream_formation_set(bebob); 589 special_stream_formation_set(bebob);
590 err = 1;
591 }
571 592
572 mutex_unlock(&bebob->mutex); 593 mutex_unlock(&bebob->mutex);
573 return err; 594 return err;
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 480bbddbd801..6df04d91c93c 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -193,7 +193,8 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
193 dsp_unlock(azx_dev); 193 dsp_unlock(azx_dev);
194 return azx_dev; 194 return azx_dev;
195 } 195 }
196 if (!res) 196 if (!res ||
197 (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
197 res = azx_dev; 198 res = azx_dev;
198 } 199 }
199 dsp_unlock(azx_dev); 200 dsp_unlock(azx_dev);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b6b4e71a0b0b..83cd19017cf3 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -227,7 +227,7 @@ enum {
227/* quirks for Intel PCH */ 227/* quirks for Intel PCH */
228#define AZX_DCAPS_INTEL_PCH_NOPM \ 228#define AZX_DCAPS_INTEL_PCH_NOPM \
229 (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \ 229 (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
230 AZX_DCAPS_COUNT_LPIB_DELAY) 230 AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_REVERSE_ASSIGN)
231 231
232#define AZX_DCAPS_INTEL_PCH \ 232#define AZX_DCAPS_INTEL_PCH \
233 (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME) 233 (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
@@ -596,7 +596,7 @@ static int azx_suspend(struct device *dev)
596 struct azx *chip = card->private_data; 596 struct azx *chip = card->private_data;
597 struct azx_pcm *p; 597 struct azx_pcm *p;
598 598
599 if (chip->disabled) 599 if (chip->disabled || chip->init_failed)
600 return 0; 600 return 0;
601 601
602 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); 602 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -628,7 +628,7 @@ static int azx_resume(struct device *dev)
628 struct snd_card *card = dev_get_drvdata(dev); 628 struct snd_card *card = dev_get_drvdata(dev);
629 struct azx *chip = card->private_data; 629 struct azx *chip = card->private_data;
630 630
631 if (chip->disabled) 631 if (chip->disabled || chip->init_failed)
632 return 0; 632 return 0;
633 633
634 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { 634 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
@@ -665,7 +665,7 @@ static int azx_runtime_suspend(struct device *dev)
665 struct snd_card *card = dev_get_drvdata(dev); 665 struct snd_card *card = dev_get_drvdata(dev);
666 struct azx *chip = card->private_data; 666 struct azx *chip = card->private_data;
667 667
668 if (chip->disabled) 668 if (chip->disabled || chip->init_failed)
669 return 0; 669 return 0;
670 670
671 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) 671 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
@@ -692,7 +692,7 @@ static int azx_runtime_resume(struct device *dev)
692 struct hda_codec *codec; 692 struct hda_codec *codec;
693 int status; 693 int status;
694 694
695 if (chip->disabled) 695 if (chip->disabled || chip->init_failed)
696 return 0; 696 return 0;
697 697
698 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) 698 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
@@ -729,7 +729,7 @@ static int azx_runtime_idle(struct device *dev)
729 struct snd_card *card = dev_get_drvdata(dev); 729 struct snd_card *card = dev_get_drvdata(dev);
730 struct azx *chip = card->private_data; 730 struct azx *chip = card->private_data;
731 731
732 if (chip->disabled) 732 if (chip->disabled || chip->init_failed)
733 return 0; 733 return 0;
734 734
735 if (!power_save_controller || 735 if (!power_save_controller ||
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index 4a7cb01fa912..e9d1a5762a55 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -186,6 +186,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
186#define AZX_DCAPS_BUFSIZE (1 << 21) /* no buffer size alignment */ 186#define AZX_DCAPS_BUFSIZE (1 << 21) /* no buffer size alignment */
187#define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */ 187#define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */
188#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */ 188#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
189#define AZX_DCAPS_REVERSE_ASSIGN (1 << 24) /* Assign devices in reverse order */
189#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ 190#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
190#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ 191#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
191#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */ 192#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index a366ba9293a8..358414da6418 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -236,6 +236,7 @@ disable_hda:
236 return rc; 236 return rc;
237} 237}
238 238
239#ifdef CONFIG_PM_SLEEP
239static void hda_tegra_disable_clocks(struct hda_tegra *data) 240static void hda_tegra_disable_clocks(struct hda_tegra *data)
240{ 241{
241 clk_disable_unprepare(data->hda2hdmi_clk); 242 clk_disable_unprepare(data->hda2hdmi_clk);
@@ -243,7 +244,6 @@ static void hda_tegra_disable_clocks(struct hda_tegra *data)
243 clk_disable_unprepare(data->hda_clk); 244 clk_disable_unprepare(data->hda_clk);
244} 245}
245 246
246#ifdef CONFIG_PM_SLEEP
247/* 247/*
248 * power management 248 * power management
249 */ 249 */
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 4fe876b65fda..ba4ca52072ff 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3337,6 +3337,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
3337{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi }, 3337{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi },
3338{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi }, 3338{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi },
3339{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, 3339{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
3340{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
3340{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, 3341{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
3341{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, 3342{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
3342{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3343{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3394,6 +3395,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0044");
3394MODULE_ALIAS("snd-hda-codec-id:10de0051"); 3395MODULE_ALIAS("snd-hda-codec-id:10de0051");
3395MODULE_ALIAS("snd-hda-codec-id:10de0060"); 3396MODULE_ALIAS("snd-hda-codec-id:10de0060");
3396MODULE_ALIAS("snd-hda-codec-id:10de0067"); 3397MODULE_ALIAS("snd-hda-codec-id:10de0067");
3398MODULE_ALIAS("snd-hda-codec-id:10de0070");
3397MODULE_ALIAS("snd-hda-codec-id:10de0071"); 3399MODULE_ALIAS("snd-hda-codec-id:10de0071");
3398MODULE_ALIAS("snd-hda-codec-id:10de8001"); 3400MODULE_ALIAS("snd-hda-codec-id:10de8001");
3399MODULE_ALIAS("snd-hda-codec-id:11069f80"); 3401MODULE_ALIAS("snd-hda-codec-id:11069f80");
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
index c342f7087147..ee53a42818ca 100644
--- a/tools/lib/lockdep/include/liblockdep/mutex.h
+++ b/tools/lib/lockdep/include/liblockdep/mutex.h
@@ -35,7 +35,7 @@ static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
35 35
36static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock) 36static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
37{ 37{
38 lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 38 lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
39 return pthread_mutex_lock(&lock->mutex); 39 return pthread_mutex_lock(&lock->mutex);
40} 40}
41 41
@@ -47,7 +47,7 @@ static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lo
47 47
48static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock) 48static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock)
49{ 49{
50 lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 50 lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
51 return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0; 51 return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0;
52} 52}
53 53
diff --git a/tools/lib/lockdep/include/liblockdep/rwlock.h b/tools/lib/lockdep/include/liblockdep/rwlock.h
index a680ab8c2e36..4ec03f861551 100644
--- a/tools/lib/lockdep/include/liblockdep/rwlock.h
+++ b/tools/lib/lockdep/include/liblockdep/rwlock.h
@@ -36,7 +36,7 @@ static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock,
36 36
37static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock) 37static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock)
38{ 38{
39 lock_acquire(&lock->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_); 39 lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
40 return pthread_rwlock_rdlock(&lock->rwlock); 40 return pthread_rwlock_rdlock(&lock->rwlock);
41 41
42} 42}
@@ -49,19 +49,19 @@ static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *
49 49
50static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock) 50static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock)
51{ 51{
52 lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 52 lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
53 return pthread_rwlock_wrlock(&lock->rwlock); 53 return pthread_rwlock_wrlock(&lock->rwlock);
54} 54}
55 55
56static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock) 56static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock)
57{ 57{
58 lock_acquire(&lock->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_); 58 lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
59 return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0; 59 return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
60} 60}
61 61
62static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock) 62static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock)
63{ 63{
64 lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 64 lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
65 return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0; 65 return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0;
66} 66}
67 67
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index 23bd69cb5ade..6f803609e498 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -92,7 +92,7 @@ enum { none, prepare, done, } __init_state;
92static void init_preload(void); 92static void init_preload(void);
93static void try_init_preload(void) 93static void try_init_preload(void)
94{ 94{
95 if (!__init_state != done) 95 if (__init_state != done)
96 init_preload(); 96 init_preload();
97} 97}
98 98
@@ -252,7 +252,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
252 252
253 try_init_preload(); 253 try_init_preload();
254 254
255 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, 255 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
256 (unsigned long)_RET_IP_); 256 (unsigned long)_RET_IP_);
257 /* 257 /*
258 * Here's the thing with pthread mutexes: unlike the kernel variant, 258 * Here's the thing with pthread mutexes: unlike the kernel variant,
@@ -281,7 +281,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
281 281
282 try_init_preload(); 282 try_init_preload();
283 283
284 lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 284 lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
285 r = ll_pthread_mutex_trylock(mutex); 285 r = ll_pthread_mutex_trylock(mutex);
286 if (r) 286 if (r)
287 lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_); 287 lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -303,7 +303,7 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex)
303 */ 303 */
304 r = ll_pthread_mutex_unlock(mutex); 304 r = ll_pthread_mutex_unlock(mutex);
305 if (r) 305 if (r)
306 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 306 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
307 307
308 return r; 308 return r;
309} 309}
@@ -352,7 +352,7 @@ int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
352 352
353 init_preload(); 353 init_preload();
354 354
355 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_); 355 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
356 r = ll_pthread_rwlock_rdlock(rwlock); 356 r = ll_pthread_rwlock_rdlock(rwlock);
357 if (r) 357 if (r)
358 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 358 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -366,7 +366,7 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
366 366
367 init_preload(); 367 init_preload();
368 368
369 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_); 369 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
370 r = ll_pthread_rwlock_tryrdlock(rwlock); 370 r = ll_pthread_rwlock_tryrdlock(rwlock);
371 if (r) 371 if (r)
372 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 372 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -380,7 +380,7 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
380 380
381 init_preload(); 381 init_preload();
382 382
383 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 383 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
384 r = ll_pthread_rwlock_trywrlock(rwlock); 384 r = ll_pthread_rwlock_trywrlock(rwlock);
385 if (r) 385 if (r)
386 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 386 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -394,7 +394,7 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
394 394
395 init_preload(); 395 init_preload();
396 396
397 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 397 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
398 r = ll_pthread_rwlock_wrlock(rwlock); 398 r = ll_pthread_rwlock_wrlock(rwlock);
399 if (r) 399 if (r)
400 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 400 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -411,7 +411,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
411 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 411 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
412 r = ll_pthread_rwlock_unlock(rwlock); 412 r = ll_pthread_rwlock_unlock(rwlock);
413 if (r) 413 if (r)
414 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 414 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
415 415
416 return r; 416 return r;
417} 417}
@@ -439,8 +439,6 @@ __attribute__((constructor)) static void init_preload(void)
439 ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock"); 439 ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
440#endif 440#endif
441 441
442 printf("%p\n", ll_pthread_mutex_trylock);fflush(stdout);
443
444 lockdep_init(); 442 lockdep_init();
445 443
446 __init_state = done; 444 __init_state = done;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 52c03fbbba17..04a229aa5c0f 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -17,6 +17,7 @@
17#include "../util.h" 17#include "../util.h"
18#include "../ui.h" 18#include "../ui.h"
19#include "map.h" 19#include "map.h"
20#include "annotate.h"
20 21
21struct hist_browser { 22struct hist_browser {
22 struct ui_browser b; 23 struct ui_browser b;
@@ -1593,13 +1594,18 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1593 bi->to.sym->name) > 0) 1594 bi->to.sym->name) > 0)
1594 annotate_t = nr_options++; 1595 annotate_t = nr_options++;
1595 } else { 1596 } else {
1596
1597 if (browser->selection != NULL && 1597 if (browser->selection != NULL &&
1598 browser->selection->sym != NULL && 1598 browser->selection->sym != NULL &&
1599 !browser->selection->map->dso->annotate_warned && 1599 !browser->selection->map->dso->annotate_warned) {
1600 asprintf(&options[nr_options], "Annotate %s", 1600 struct annotation *notes;
1601 browser->selection->sym->name) > 0) 1601
1602 annotate = nr_options++; 1602 notes = symbol__annotation(browser->selection->sym);
1603
1604 if (notes->src &&
1605 asprintf(&options[nr_options], "Annotate %s",
1606 browser->selection->sym->name) > 0)
1607 annotate = nr_options++;
1608 }
1603 } 1609 }
1604 1610
1605 if (thread != NULL && 1611 if (thread != NULL &&
@@ -1656,6 +1662,7 @@ retry_popup_menu:
1656 1662
1657 if (choice == annotate || choice == annotate_t || choice == annotate_f) { 1663 if (choice == annotate || choice == annotate_t || choice == annotate_f) {
1658 struct hist_entry *he; 1664 struct hist_entry *he;
1665 struct annotation *notes;
1659 int err; 1666 int err;
1660do_annotate: 1667do_annotate:
1661 if (!objdump_path && perf_session_env__lookup_objdump(env)) 1668 if (!objdump_path && perf_session_env__lookup_objdump(env))
@@ -1679,6 +1686,10 @@ do_annotate:
1679 he->ms.map = he->branch_info->to.map; 1686 he->ms.map = he->branch_info->to.map;
1680 } 1687 }
1681 1688
1689 notes = symbol__annotation(he->ms.sym);
1690 if (!notes->src)
1691 continue;
1692
1682 /* 1693 /*
1683 * Don't let this be freed, say, by hists__decay_entry. 1694 * Don't let this be freed, say, by hists__decay_entry.
1684 */ 1695 */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 0e5fea95d596..c73e1fc12e53 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -496,18 +496,6 @@ struct process_args {
496 u64 start; 496 u64 start;
497}; 497};
498 498
499static int symbol__in_kernel(void *arg, const char *name,
500 char type __maybe_unused, u64 start)
501{
502 struct process_args *args = arg;
503
504 if (strchr(name, '['))
505 return 0;
506
507 args->start = start;
508 return 1;
509}
510
511static void machine__get_kallsyms_filename(struct machine *machine, char *buf, 499static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
512 size_t bufsz) 500 size_t bufsz)
513{ 501{
@@ -517,27 +505,41 @@ static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
517 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 505 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
518} 506}
519 507
520/* Figure out the start address of kernel map from /proc/kallsyms */ 508const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
521static u64 machine__get_kernel_start_addr(struct machine *machine) 509
510/* Figure out the start address of kernel map from /proc/kallsyms.
511 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
512 * symbol_name if it's not that important.
513 */
514static u64 machine__get_kernel_start_addr(struct machine *machine,
515 const char **symbol_name)
522{ 516{
523 char filename[PATH_MAX]; 517 char filename[PATH_MAX];
524 struct process_args args; 518 int i;
519 const char *name;
520 u64 addr = 0;
525 521
526 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 522 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
527 523
528 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 524 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
529 return 0; 525 return 0;
530 526
531 if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) 527 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
532 return 0; 528 addr = kallsyms__get_function_start(filename, name);
529 if (addr)
530 break;
531 }
532
533 if (symbol_name)
534 *symbol_name = name;
533 535
534 return args.start; 536 return addr;
535} 537}
536 538
537int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 539int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
538{ 540{
539 enum map_type type; 541 enum map_type type;
540 u64 start = machine__get_kernel_start_addr(machine); 542 u64 start = machine__get_kernel_start_addr(machine, NULL);
541 543
542 for (type = 0; type < MAP__NR_TYPES; ++type) { 544 for (type = 0; type < MAP__NR_TYPES; ++type) {
543 struct kmap *kmap; 545 struct kmap *kmap;
@@ -852,23 +854,11 @@ static int machine__create_modules(struct machine *machine)
852 return 0; 854 return 0;
853} 855}
854 856
855const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
856
857int machine__create_kernel_maps(struct machine *machine) 857int machine__create_kernel_maps(struct machine *machine)
858{ 858{
859 struct dso *kernel = machine__get_kernel(machine); 859 struct dso *kernel = machine__get_kernel(machine);
860 char filename[PATH_MAX];
861 const char *name; 860 const char *name;
862 u64 addr = 0; 861 u64 addr = machine__get_kernel_start_addr(machine, &name);
863 int i;
864
865 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
866
867 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
868 addr = kallsyms__get_function_start(filename, name);
869 if (addr)
870 break;
871 }
872 if (!addr) 862 if (!addr)
873 return -1; 863 return -1;
874 864
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 56ff9bebb577..476d3bf540a8 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1526,17 +1526,33 @@ int kvm_vgic_hyp_init(void)
1526 goto out_unmap; 1526 goto out_unmap;
1527 } 1527 }
1528 1528
1529 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
1530 vctrl_res.start, vgic_maint_irq);
1531 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1532
1533 if (of_address_to_resource(vgic_node, 3, &vcpu_res)) { 1529 if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
1534 kvm_err("Cannot obtain VCPU resource\n"); 1530 kvm_err("Cannot obtain VCPU resource\n");
1535 ret = -ENXIO; 1531 ret = -ENXIO;
1536 goto out_unmap; 1532 goto out_unmap;
1537 } 1533 }
1534
1535 if (!PAGE_ALIGNED(vcpu_res.start)) {
1536 kvm_err("GICV physical address 0x%llx not page aligned\n",
1537 (unsigned long long)vcpu_res.start);
1538 ret = -ENXIO;
1539 goto out_unmap;
1540 }
1541
1542 if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
1543 kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
1544 (unsigned long long)resource_size(&vcpu_res),
1545 PAGE_SIZE);
1546 ret = -ENXIO;
1547 goto out_unmap;
1548 }
1549
1538 vgic_vcpu_base = vcpu_res.start; 1550 vgic_vcpu_base = vcpu_res.start;
1539 1551
1552 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
1553 vctrl_res.start, vgic_maint_irq);
1554 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1555
1540 goto out; 1556 goto out;
1541 1557
1542out_unmap: 1558out_unmap: