aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/arm/OMAP/README7
-rw-r--r--MAINTAINERS7
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts3
-rw-r--r--arch/arm/boot/dts/armada-385-db-ap.dts2
-rw-r--r--arch/arm/boot/dts/berlin2q.dtsi6
-rw-r--r--arch/arm/boot/dts/exynos5420-peach-pit.dts5
-rw-r--r--arch/arm/boot/dts/exynos5800-peach-pi.dts5
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi4
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts2
-rw-r--r--arch/arm/boot/dts/meson.dtsi23
-rw-r--r--arch/arm/boot/dts/omap3-evm-37xx.dts2
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra114.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra124.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi2
-rw-r--r--arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts2
-rw-r--r--arch/arm/kvm/Kconfig1
-rw-r--r--arch/arm/kvm/arm.c2
-rw-r--r--arch/arm/mach-exynos/pm_domains.c8
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/board-generic.c10
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c9
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c9
-rw-r--r--arch/arm/net/bpf_jit_32.c1
-rw-r--r--arch/arm/plat-orion/common.c2
-rw-r--r--arch/powerpc/include/asm/cache.h7
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h17
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/kernel/rtas.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv.c55
-rw-r--r--arch/powerpc/platforms/powernv/smp.c29
-rw-r--r--arch/um/Makefile4
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/um/os-Linux/helper.c6
-rw-r--r--arch/x86/boot/compressed/eboot.c8
-rw-r--r--arch/x86/include/asm/string_64.h5
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/setup.c8
-rw-r--r--arch/x86/kernel/smpboot.c15
-rw-r--r--arch/x86/um/ldt.c5
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-mq-tag.c1
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/blk-sysfs.c1
-rw-r--r--drivers/base/dma-contiguous.c2
-rw-r--r--drivers/block/nbd.c36
-rw-r--r--drivers/block/nvme-core.c24
-rw-r--r--drivers/block/rbd.c69
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/bus/arm-ccn.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c3
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c120
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c8
-rw-r--r--drivers/iio/accel/st_accel_core.c6
-rw-r--r--drivers/iio/adc/twl4030-madc.c34
-rw-r--r--drivers/infiniband/core/cache.c2
-rw-r--r--drivers/infiniband/core/cm.c10
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c35
-rw-r--r--drivers/infiniband/core/ucma.c7
-rw-r--r--drivers/iommu/intel-iommu.c12
-rw-r--r--drivers/md/dm-cache-metadata.c2
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c17
-rw-r--r--drivers/md/persistent-data/dm-btree.c2
-rw-r--r--drivers/media/dvb-frontends/horus3a.h4
-rw-r--r--drivers/media/dvb-frontends/lnbh25.h2
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c73
-rw-r--r--drivers/media/dvb-frontends/si2168.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_spi.c12
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c7
-rw-r--r--drivers/media/rc/ir-hix5hd2.c2
-rw-r--r--drivers/media/tuners/si2157.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c15
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.h2
-rw-r--r--drivers/media/v4l2-core/Kconfig2
-rw-r--r--drivers/memory/Kconfig12
-rw-r--r--drivers/memory/omap-gpmc.c2
-rw-r--r--drivers/mmc/card/mmc_test.c9
-rw-r--r--drivers/mmc/core/mmc.c7
-rw-r--r--drivers/net/can/sja1000/peak_pci.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c32
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c31
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c28
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c13
-rw-r--r--drivers/net/ethernet/via/via-rhine.c3
-rw-r--r--drivers/net/geneve.c12
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/ppp/pppoe.c1
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix_common.c4
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/vxlan.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c1
-rw-r--r--drivers/net/wireless/b43/main.c1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c27
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/hw.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/sw.c5
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h3
-rw-r--r--drivers/net/xen-netback/xenbus.c6
-rw-r--r--drivers/perf/arm_pmu.c10
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c2
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c9
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/tty/serial/8250/8250_dma.c4
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c30
-rw-r--r--drivers/video/console/fbcon.c1
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/ioctl.c5
-rw-r--r--fs/fs-writeback.c35
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c3
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2
-rw-r--r--include/linux/backing-dev-defs.h3
-rw-r--r--include/linux/backing-dev.h69
-rw-r--r--include/linux/cma.h2
-rw-r--r--include/linux/compiler-gcc.h13
-rw-r--r--include/linux/compiler.h66
-rw-r--r--include/linux/dma-contiguous.h4
-rw-r--r--include/linux/memcontrol.h8
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/net/inet_timewait_sock.h4
-rw-r--r--include/net/sock.h8
-rw-r--r--include/sound/soc.h6
-rw-r--r--include/sound/wm8904.h2
-rw-r--r--include/uapi/linux/openvswitch.h36
-rw-r--r--include/uapi/linux/rtnetlink.h2
-rw-r--r--kernel/kmod.c8
-rw-r--r--kernel/sched/core.c12
-rw-r--r--kernel/sched/deadline.c17
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/sched/idle.c2
-rw-r--r--kernel/trace/trace_stack.c11
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/fault-inject.c2
-rw-r--r--mm/backing-dev.c36
-rw-r--r--mm/cma.c4
-rw-r--r--mm/filemap.c9
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/memcontrol.c35
-rw-r--r--mm/page-writeback.c54
-rw-r--r--net/bluetooth/hci_conn.c99
-rw-r--r--net/bluetooth/hci_core.c7
-rw-r--r--net/bluetooth/hci_event.c11
-rw-r--r--net/bluetooth/mgmt.c24
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/filter.c7
-rw-r--r--net/dsa/dsa.c70
-rw-r--r--net/ipv4/arp.c8
-rw-r--r--net/ipv4/inet_connection_sock.c19
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/route.c49
-rw-r--r--net/ipv6/xfrm6_policy.c1
-rw-r--r--net/mac80211/debugfs.c2
-rw-r--r--net/mac80211/status.c1
-rw-r--r--net/mac80211/tx.c7
-rw-r--r--net/netlink/af_netlink.c34
-rw-r--r--net/openvswitch/actions.c19
-rw-r--r--net/openvswitch/conntrack.c89
-rw-r--r--net/openvswitch/conntrack.h14
-rw-r--r--net/openvswitch/flow.h2
-rw-r--r--net/openvswitch/flow_netlink.c30
-rw-r--r--net/openvswitch/flow_table.c3
-rw-r--r--net/openvswitch/vport.c51
-rw-r--r--net/sched/act_mirred.c18
-rw-r--r--net/sched/sch_hhf.c11
-rw-r--r--net/switchdev/switchdev.c3
-rw-r--r--net/tipc/msg.h4
-rw-r--r--net/tipc/node.c6
-rw-r--r--net/unix/af_unix.c12
-rw-r--r--security/keys/gc.c6
-rw-r--r--security/keys/request_key.c3
-rw-r--r--sound/hda/ext/hdac_ext_bus.c1
-rw-r--r--sound/pci/hda/hda_codec.c4
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/soc/codecs/rt298.c26
-rw-r--r--sound/soc/codecs/wm8962.c2
-rw-r--r--sound/soc/soc-ops.c28
-rw-r--r--tools/power/cpupower/debug/i386/dump_psb.c2
-rw-r--r--tools/power/cpupower/man/cpupower-idle-set.14
-rw-r--r--tools/power/cpupower/utils/cpufreq-info.c30
-rw-r--r--tools/power/cpupower/utils/cpufreq-set.c10
-rw-r--r--tools/power/cpupower/utils/cpuidle-info.c4
-rw-r--r--tools/power/cpupower/utils/cpuidle-set.c33
-rw-r--r--tools/power/cpupower/utils/cpupower-info.c4
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c2
-rw-r--r--tools/power/cpupower/utils/helpers/topology.c23
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c9
-rw-r--r--virt/kvm/arm/arch_timer.c19
-rw-r--r--virt/kvm/arm/vgic.c95
228 files changed, 1705 insertions, 994 deletions
diff --git a/.mailmap b/.mailmap
index 4b31af54ccd5..b1e9a97653dc 100644
--- a/.mailmap
+++ b/.mailmap
@@ -59,6 +59,7 @@ James Bottomley <jejb@mulgrave.(none)>
59James Bottomley <jejb@titanic.il.steeleye.com> 59James Bottomley <jejb@titanic.il.steeleye.com>
60James E Wilson <wilson@specifix.com> 60James E Wilson <wilson@specifix.com>
61James Ketrenos <jketreno@io.(none)> 61James Ketrenos <jketreno@io.(none)>
62<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
62Jean Tourrilhes <jt@hpl.hp.com> 63Jean Tourrilhes <jt@hpl.hp.com>
63Jeff Garzik <jgarzik@pretzel.yyz.us> 64Jeff Garzik <jgarzik@pretzel.yyz.us>
64Jens Axboe <axboe@suse.de> 65Jens Axboe <axboe@suse.de>
diff --git a/Documentation/arm/OMAP/README b/Documentation/arm/OMAP/README
new file mode 100644
index 000000000000..75645c45d14a
--- /dev/null
+++ b/Documentation/arm/OMAP/README
@@ -0,0 +1,7 @@
1This file contains documentation for running mainline
2kernel on omaps.
3
4KERNEL NEW DEPENDENCIES
5v4.3+ Update is needed for custom .config files to make sure
6 CONFIG_REGULATOR_PBIAS is enabled for MMC1 to work
7 properly.
diff --git a/MAINTAINERS b/MAINTAINERS
index b8577ad9b8a2..9de185da5f5b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -894,11 +894,12 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
894L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 894L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
895S: Maintained 895S: Maintained
896 896
897ARM/Allwinner A1X SoC support 897ARM/Allwinner sunXi SoC support
898M: Maxime Ripard <maxime.ripard@free-electrons.com> 898M: Maxime Ripard <maxime.ripard@free-electrons.com>
899M: Chen-Yu Tsai <wens@csie.org>
899L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 900L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
900S: Maintained 901S: Maintained
901N: sun[x4567]i 902N: sun[x456789]i
902 903
903ARM/Allwinner SoC Clock Support 904ARM/Allwinner SoC Clock Support
904M: Emilio López <emilio@elopez.com.ar> 905M: Emilio López <emilio@elopez.com.ar>
@@ -6793,7 +6794,6 @@ F: drivers/scsi/megaraid/
6793 6794
6794MELLANOX ETHERNET DRIVER (mlx4_en) 6795MELLANOX ETHERNET DRIVER (mlx4_en)
6795M: Amir Vadai <amirv@mellanox.com> 6796M: Amir Vadai <amirv@mellanox.com>
6796M: Ido Shamay <idos@mellanox.com>
6797L: netdev@vger.kernel.org 6797L: netdev@vger.kernel.org
6798S: Supported 6798S: Supported
6799W: http://www.mellanox.com 6799W: http://www.mellanox.com
@@ -11675,6 +11675,7 @@ F: drivers/tty/serial/zs.*
11675ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR 11675ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR
11676M: Minchan Kim <minchan@kernel.org> 11676M: Minchan Kim <minchan@kernel.org>
11677M: Nitin Gupta <ngupta@vflare.org> 11677M: Nitin Gupta <ngupta@vflare.org>
11678R: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
11678L: linux-mm@kvack.org 11679L: linux-mm@kvack.org
11679S: Maintained 11680S: Maintained
11680F: mm/zsmalloc.c 11681F: mm/zsmalloc.c
diff --git a/Makefile b/Makefile
index d33ab74bffce..431067a41fcf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 3 2PATCHLEVEL = 3
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Blurry Fish Butt 5NAME = Blurry Fish Butt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 568adf5efde0..d55e3ea89fda 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -402,11 +402,12 @@
402 /* SMPS9 unused */ 402 /* SMPS9 unused */
403 403
404 ldo1_reg: ldo1 { 404 ldo1_reg: ldo1 {
405 /* VDD_SD */ 405 /* VDD_SD / VDDSHV8 */
406 regulator-name = "ldo1"; 406 regulator-name = "ldo1";
407 regulator-min-microvolt = <1800000>; 407 regulator-min-microvolt = <1800000>;
408 regulator-max-microvolt = <3300000>; 408 regulator-max-microvolt = <3300000>;
409 regulator-boot-on; 409 regulator-boot-on;
410 regulator-always-on;
410 }; 411 };
411 412
412 ldo2_reg: ldo2 { 413 ldo2_reg: ldo2 {
diff --git a/arch/arm/boot/dts/armada-385-db-ap.dts b/arch/arm/boot/dts/armada-385-db-ap.dts
index 89f5a95954ed..4047621b137e 100644
--- a/arch/arm/boot/dts/armada-385-db-ap.dts
+++ b/arch/arm/boot/dts/armada-385-db-ap.dts
@@ -46,7 +46,7 @@
46 46
47/ { 47/ {
48 model = "Marvell Armada 385 Access Point Development Board"; 48 model = "Marvell Armada 385 Access Point Development Board";
49 compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada38x"; 49 compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada380";
50 50
51 chosen { 51 chosen {
52 stdout-path = "serial1:115200n8"; 52 stdout-path = "serial1:115200n8";
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
index 63a48490e2f9..d4dbd28d348c 100644
--- a/arch/arm/boot/dts/berlin2q.dtsi
+++ b/arch/arm/boot/dts/berlin2q.dtsi
@@ -152,7 +152,7 @@
152 }; 152 };
153 153
154 usb_phy2: phy@a2f400 { 154 usb_phy2: phy@a2f400 {
155 compatible = "marvell,berlin2-usb-phy"; 155 compatible = "marvell,berlin2cd-usb-phy";
156 reg = <0xa2f400 0x128>; 156 reg = <0xa2f400 0x128>;
157 #phy-cells = <0>; 157 #phy-cells = <0>;
158 resets = <&chip_rst 0x104 14>; 158 resets = <&chip_rst 0x104 14>;
@@ -170,7 +170,7 @@
170 }; 170 };
171 171
172 usb_phy0: phy@b74000 { 172 usb_phy0: phy@b74000 {
173 compatible = "marvell,berlin2-usb-phy"; 173 compatible = "marvell,berlin2cd-usb-phy";
174 reg = <0xb74000 0x128>; 174 reg = <0xb74000 0x128>;
175 #phy-cells = <0>; 175 #phy-cells = <0>;
176 resets = <&chip_rst 0x104 12>; 176 resets = <&chip_rst 0x104 12>;
@@ -178,7 +178,7 @@
178 }; 178 };
179 179
180 usb_phy1: phy@b78000 { 180 usb_phy1: phy@b78000 {
181 compatible = "marvell,berlin2-usb-phy"; 181 compatible = "marvell,berlin2cd-usb-phy";
182 reg = <0xb78000 0x128>; 182 reg = <0xb78000 0x128>;
183 #phy-cells = <0>; 183 #phy-cells = <0>;
184 resets = <&chip_rst 0x104 13>; 184 resets = <&chip_rst 0x104 13>;
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index 8f4d76c5e11c..1b95da79293c 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -915,6 +915,11 @@
915 }; 915 };
916}; 916};
917 917
918&pmu_system_controller {
919 assigned-clocks = <&pmu_system_controller 0>;
920 assigned-clock-parents = <&clock CLK_FIN_PLL>;
921};
922
918&rtc { 923&rtc {
919 status = "okay"; 924 status = "okay";
920 clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>; 925 clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>;
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index 7d5b386b5ae6..8f40c7e549bd 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -878,6 +878,11 @@
878 }; 878 };
879}; 879};
880 880
881&pmu_system_controller {
882 assigned-clocks = <&pmu_system_controller 0>;
883 assigned-clock-parents = <&clock CLK_FIN_PLL>;
884};
885
881&rtc { 886&rtc {
882 status = "okay"; 887 status = "okay";
883 clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>; 888 clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>;
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index b738ce0f9d9b..6e444bb873f9 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -588,10 +588,10 @@
588 status = "disabled"; 588 status = "disabled";
589 }; 589 };
590 590
591 uart2: serial@30870000 { 591 uart2: serial@30890000 {
592 compatible = "fsl,imx7d-uart", 592 compatible = "fsl,imx7d-uart",
593 "fsl,imx6q-uart"; 593 "fsl,imx6q-uart";
594 reg = <0x30870000 0x10000>; 594 reg = <0x30890000 0x10000>;
595 interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>; 595 interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
596 clocks = <&clks IMX7D_UART2_ROOT_CLK>, 596 clocks = <&clks IMX7D_UART2_ROOT_CLK>,
597 <&clks IMX7D_UART2_ROOT_CLK>; 597 <&clks IMX7D_UART2_ROOT_CLK>;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
index 91146c318798..5b0430041ec6 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
@@ -12,7 +12,7 @@
12 12
13/ { 13/ {
14 model = "LogicPD Zoom DM3730 Torpedo Development Kit"; 14 model = "LogicPD Zoom DM3730 Torpedo Development Kit";
15 compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap36xx"; 15 compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3630", "ti,omap3";
16 16
17 gpio_keys { 17 gpio_keys {
18 compatible = "gpio-keys"; 18 compatible = "gpio-keys";
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 548441384d2a..8c77c87660cd 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -67,7 +67,7 @@
67 67
68 timer@c1109940 { 68 timer@c1109940 {
69 compatible = "amlogic,meson6-timer"; 69 compatible = "amlogic,meson6-timer";
70 reg = <0xc1109940 0x14>; 70 reg = <0xc1109940 0x18>;
71 interrupts = <0 10 1>; 71 interrupts = <0 10 1>;
72 }; 72 };
73 73
@@ -80,36 +80,37 @@
80 wdt: watchdog@c1109900 { 80 wdt: watchdog@c1109900 {
81 compatible = "amlogic,meson6-wdt"; 81 compatible = "amlogic,meson6-wdt";
82 reg = <0xc1109900 0x8>; 82 reg = <0xc1109900 0x8>;
83 interrupts = <0 0 1>;
83 }; 84 };
84 85
85 uart_AO: serial@c81004c0 { 86 uart_AO: serial@c81004c0 {
86 compatible = "amlogic,meson-uart"; 87 compatible = "amlogic,meson-uart";
87 reg = <0xc81004c0 0x14>; 88 reg = <0xc81004c0 0x18>;
88 interrupts = <0 90 1>; 89 interrupts = <0 90 1>;
89 clocks = <&clk81>; 90 clocks = <&clk81>;
90 status = "disabled"; 91 status = "disabled";
91 }; 92 };
92 93
93 uart_A: serial@c81084c0 { 94 uart_A: serial@c11084c0 {
94 compatible = "amlogic,meson-uart"; 95 compatible = "amlogic,meson-uart";
95 reg = <0xc81084c0 0x14>; 96 reg = <0xc11084c0 0x18>;
96 interrupts = <0 90 1>; 97 interrupts = <0 26 1>;
97 clocks = <&clk81>; 98 clocks = <&clk81>;
98 status = "disabled"; 99 status = "disabled";
99 }; 100 };
100 101
101 uart_B: serial@c81084dc { 102 uart_B: serial@c11084dc {
102 compatible = "amlogic,meson-uart"; 103 compatible = "amlogic,meson-uart";
103 reg = <0xc81084dc 0x14>; 104 reg = <0xc11084dc 0x18>;
104 interrupts = <0 90 1>; 105 interrupts = <0 75 1>;
105 clocks = <&clk81>; 106 clocks = <&clk81>;
106 status = "disabled"; 107 status = "disabled";
107 }; 108 };
108 109
109 uart_C: serial@c8108700 { 110 uart_C: serial@c1108700 {
110 compatible = "amlogic,meson-uart"; 111 compatible = "amlogic,meson-uart";
111 reg = <0xc8108700 0x14>; 112 reg = <0xc1108700 0x18>;
112 interrupts = <0 90 1>; 113 interrupts = <0 93 1>;
113 clocks = <&clk81>; 114 clocks = <&clk81>;
114 status = "disabled"; 115 status = "disabled";
115 }; 116 };
diff --git a/arch/arm/boot/dts/omap3-evm-37xx.dts b/arch/arm/boot/dts/omap3-evm-37xx.dts
index 16e8ce350dda..bb339d1648e0 100644
--- a/arch/arm/boot/dts/omap3-evm-37xx.dts
+++ b/arch/arm/boot/dts/omap3-evm-37xx.dts
@@ -13,7 +13,7 @@
13 13
14/ { 14/ {
15 model = "TI OMAP37XX EVM (TMDSEVM3730)"; 15 model = "TI OMAP37XX EVM (TMDSEVM3730)";
16 compatible = "ti,omap3-evm-37xx", "ti,omap36xx"; 16 compatible = "ti,omap3-evm-37xx", "ti,omap3630", "ti,omap3";
17 17
18 memory { 18 memory {
19 device_type = "memory"; 19 device_type = "memory";
diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
index 810cda743b6d..9c2387b34d0c 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi
+++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
@@ -56,7 +56,7 @@
56 /* VMMCI level-shifter enable */ 56 /* VMMCI level-shifter enable */
57 default_hrefv60_cfg2 { 57 default_hrefv60_cfg2 {
58 pins = "GPIO169_D22"; 58 pins = "GPIO169_D22";
59 ste,config = <&gpio_out_lo>; 59 ste,config = <&gpio_out_hi>;
60 }; 60 };
61 /* VMMCI level-shifter voltage select */ 61 /* VMMCI level-shifter voltage select */
62 default_hrefv60_cfg3 { 62 default_hrefv60_cfg3 {
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index 9d4f86e9c50a..d845bd1448b5 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -234,7 +234,9 @@
234 gpio-controller; 234 gpio-controller;
235 #interrupt-cells = <2>; 235 #interrupt-cells = <2>;
236 interrupt-controller; 236 interrupt-controller;
237 /*
237 gpio-ranges = <&pinmux 0 0 246>; 238 gpio-ranges = <&pinmux 0 0 246>;
239 */
238 }; 240 };
239 241
240 apbmisc@70000800 { 242 apbmisc@70000800 {
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index 1e204a6de12c..819e2ae2cabe 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -258,7 +258,9 @@
258 gpio-controller; 258 gpio-controller;
259 #interrupt-cells = <2>; 259 #interrupt-cells = <2>;
260 interrupt-controller; 260 interrupt-controller;
261 /*
261 gpio-ranges = <&pinmux 0 0 251>; 262 gpio-ranges = <&pinmux 0 0 251>;
263 */
262 }; 264 };
263 265
264 apbdma: dma@0,60020000 { 266 apbdma: dma@0,60020000 {
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index e058709e6d98..969b828505ae 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -244,7 +244,9 @@
244 gpio-controller; 244 gpio-controller;
245 #interrupt-cells = <2>; 245 #interrupt-cells = <2>;
246 interrupt-controller; 246 interrupt-controller;
247 /*
247 gpio-ranges = <&pinmux 0 0 224>; 248 gpio-ranges = <&pinmux 0 0 224>;
249 */
248 }; 250 };
249 251
250 apbmisc@70000800 { 252 apbmisc@70000800 {
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index fe04fb5e155f..c6938ad1b543 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -349,7 +349,9 @@
349 gpio-controller; 349 gpio-controller;
350 #interrupt-cells = <2>; 350 #interrupt-cells = <2>;
351 interrupt-controller; 351 interrupt-controller;
352 /*
352 gpio-ranges = <&pinmux 0 0 248>; 353 gpio-ranges = <&pinmux 0 0 248>;
354 */
353 }; 355 };
354 356
355 apbmisc@70000800 { 357 apbmisc@70000800 {
diff --git a/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts b/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts
index 33963acd7e8f..f80f772d99fb 100644
--- a/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts
+++ b/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts
@@ -85,7 +85,7 @@
85}; 85};
86 86
87&ethsc { 87&ethsc {
88 interrupts = <0 50 4>; 88 interrupts = <0 52 4>;
89}; 89};
90 90
91&serial0 { 91&serial0 {
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 210eccadb69a..356970f3b25e 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
21 depends on MMU && OF 21 depends on MMU && OF
22 select PREEMPT_NOTIFIERS 22 select PREEMPT_NOTIFIERS
23 select ANON_INODES 23 select ANON_INODES
24 select ARM_GIC
24 select HAVE_KVM_CPU_RELAX_INTERCEPT 25 select HAVE_KVM_CPU_RELAX_INTERCEPT
25 select HAVE_KVM_ARCH_TLB_FLUSH_ALL 26 select HAVE_KVM_ARCH_TLB_FLUSH_ALL
26 select KVM_MMIO 27 select KVM_MMIO
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index dc017adfddc8..78b286994577 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1080,7 +1080,7 @@ static int init_hyp_mode(void)
1080 */ 1080 */
1081 err = kvm_timer_hyp_init(); 1081 err = kvm_timer_hyp_init();
1082 if (err) 1082 if (err)
1083 goto out_free_mappings; 1083 goto out_free_context;
1084 1084
1085#ifndef CONFIG_HOTPLUG_CPU 1085#ifndef CONFIG_HOTPLUG_CPU
1086 free_boot_hyp_pgd(); 1086 free_boot_hyp_pgd();
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 4a87e86dec45..7c21760f590f 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -200,15 +200,15 @@ no_clk:
200 args.args_count = 0; 200 args.args_count = 0;
201 child_domain = of_genpd_get_from_provider(&args); 201 child_domain = of_genpd_get_from_provider(&args);
202 if (IS_ERR(child_domain)) 202 if (IS_ERR(child_domain))
203 goto next_pd; 203 continue;
204 204
205 if (of_parse_phandle_with_args(np, "power-domains", 205 if (of_parse_phandle_with_args(np, "power-domains",
206 "#power-domain-cells", 0, &args) != 0) 206 "#power-domain-cells", 0, &args) != 0)
207 goto next_pd; 207 continue;
208 208
209 parent_domain = of_genpd_get_from_provider(&args); 209 parent_domain = of_genpd_get_from_provider(&args);
210 if (IS_ERR(parent_domain)) 210 if (IS_ERR(parent_domain))
211 goto next_pd; 211 continue;
212 212
213 if (pm_genpd_add_subdomain(parent_domain, child_domain)) 213 if (pm_genpd_add_subdomain(parent_domain, child_domain))
214 pr_warn("%s failed to add subdomain: %s\n", 214 pr_warn("%s failed to add subdomain: %s\n",
@@ -216,8 +216,6 @@ no_clk:
216 else 216 else
217 pr_info("%s has as child subdomain: %s.\n", 217 pr_info("%s has as child subdomain: %s.\n",
218 parent_domain->name, child_domain->name); 218 parent_domain->name, child_domain->name);
219next_pd:
220 of_node_put(np);
221 } 219 }
222 220
223 return 0; 221 return 0;
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index b3a0dff67e3f..33d1460a5639 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -49,6 +49,7 @@ config SOC_OMAP5
49 select OMAP_INTERCONNECT 49 select OMAP_INTERCONNECT
50 select OMAP_INTERCONNECT_BARRIER 50 select OMAP_INTERCONNECT_BARRIER
51 select PM_OPP if PM 51 select PM_OPP if PM
52 select ZONE_DMA if ARM_LPAE
52 53
53config SOC_AM33XX 54config SOC_AM33XX
54 bool "TI AM33XX" 55 bool "TI AM33XX"
@@ -78,6 +79,7 @@ config SOC_DRA7XX
78 select OMAP_INTERCONNECT 79 select OMAP_INTERCONNECT
79 select OMAP_INTERCONNECT_BARRIER 80 select OMAP_INTERCONNECT_BARRIER
80 select PM_OPP if PM 81 select PM_OPP if PM
82 select ZONE_DMA if ARM_LPAE
81 83
82config ARCH_OMAP2PLUS 84config ARCH_OMAP2PLUS
83 bool 85 bool
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 6133eaac685d..fb219a30c10c 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -106,6 +106,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
106MACHINE_END 106MACHINE_END
107 107
108static const char *const omap36xx_boards_compat[] __initconst = { 108static const char *const omap36xx_boards_compat[] __initconst = {
109 "ti,omap3630",
109 "ti,omap36xx", 110 "ti,omap36xx",
110 NULL, 111 NULL,
111}; 112};
@@ -243,6 +244,9 @@ static const char *const omap5_boards_compat[] __initconst = {
243}; 244};
244 245
245DT_MACHINE_START(OMAP5_DT, "Generic OMAP5 (Flattened Device Tree)") 246DT_MACHINE_START(OMAP5_DT, "Generic OMAP5 (Flattened Device Tree)")
247#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
248 .dma_zone_size = SZ_2G,
249#endif
246 .reserve = omap_reserve, 250 .reserve = omap_reserve,
247 .smp = smp_ops(omap4_smp_ops), 251 .smp = smp_ops(omap4_smp_ops),
248 .map_io = omap5_map_io, 252 .map_io = omap5_map_io,
@@ -288,6 +292,9 @@ static const char *const dra74x_boards_compat[] __initconst = {
288}; 292};
289 293
290DT_MACHINE_START(DRA74X_DT, "Generic DRA74X (Flattened Device Tree)") 294DT_MACHINE_START(DRA74X_DT, "Generic DRA74X (Flattened Device Tree)")
295#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
296 .dma_zone_size = SZ_2G,
297#endif
291 .reserve = omap_reserve, 298 .reserve = omap_reserve,
292 .smp = smp_ops(omap4_smp_ops), 299 .smp = smp_ops(omap4_smp_ops),
293 .map_io = dra7xx_map_io, 300 .map_io = dra7xx_map_io,
@@ -308,6 +315,9 @@ static const char *const dra72x_boards_compat[] __initconst = {
308}; 315};
309 316
310DT_MACHINE_START(DRA72X_DT, "Generic DRA72X (Flattened Device Tree)") 317DT_MACHINE_START(DRA72X_DT, "Generic DRA72X (Flattened Device Tree)")
318#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
319 .dma_zone_size = SZ_2G,
320#endif
311 .reserve = omap_reserve, 321 .reserve = omap_reserve,
312 .map_io = dra7xx_map_io, 322 .map_io = dra7xx_map_io,
313 .init_early = dra7xx_init_early, 323 .init_early = dra7xx_init_early,
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index ea56397599c2..1dfe34654c43 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -559,7 +559,14 @@ static void pdata_quirks_check(struct pdata_init *quirks)
559 559
560void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table) 560void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table)
561{ 561{
562 omap_sdrc_init(NULL, NULL); 562 /*
563 * We still need this for omap2420 and omap3 PM to work, others are
564 * using drivers/misc/sram.c already.
565 */
566 if (of_machine_is_compatible("ti,omap2420") ||
567 of_machine_is_compatible("ti,omap3"))
568 omap_sdrc_init(NULL, NULL);
569
563 pdata_quirks_check(auxdata_quirks); 570 pdata_quirks_check(auxdata_quirks);
564 of_platform_populate(NULL, omap_dt_match_table, 571 of_platform_populate(NULL, omap_dt_match_table,
565 omap_auxdata_lookup, NULL); 572 omap_auxdata_lookup, NULL);
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 06005d3f2ba3..20ce2d386f17 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -42,10 +42,6 @@
42#define PECR_IS(n) ((1 << ((n) * 2)) << 29) 42#define PECR_IS(n) ((1 << ((n) * 2)) << 29)
43 43
44extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)); 44extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
45#ifdef CONFIG_PM
46
47#define ISRAM_START 0x5c000000
48#define ISRAM_SIZE SZ_256K
49 45
50/* 46/*
51 * NAND NFC: DFI bus arbitration subset 47 * NAND NFC: DFI bus arbitration subset
@@ -54,6 +50,11 @@ extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
54#define NDCR_ND_ARB_EN (1 << 12) 50#define NDCR_ND_ARB_EN (1 << 12)
55#define NDCR_ND_ARB_CNTL (1 << 19) 51#define NDCR_ND_ARB_CNTL (1 << 19)
56 52
53#ifdef CONFIG_PM
54
55#define ISRAM_START 0x5c000000
56#define ISRAM_SIZE SZ_256K
57
57static void __iomem *sram; 58static void __iomem *sram;
58static unsigned long wakeup_src; 59static unsigned long wakeup_src;
59 60
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 876060bcceeb..b8efb8cd1f73 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -614,6 +614,7 @@ load_common:
614 case BPF_LD | BPF_B | BPF_IND: 614 case BPF_LD | BPF_B | BPF_IND:
615 load_order = 0; 615 load_order = 0;
616load_ind: 616load_ind:
617 update_on_xread(ctx);
617 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); 618 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
618 goto load_common; 619 goto load_common;
619 case BPF_LDX | BPF_IMM: 620 case BPF_LDX | BPF_IMM:
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 2235081a04ee..8861c367d061 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -495,7 +495,7 @@ void __init orion_ge00_switch_init(struct dsa_platform_data *d, int irq)
495 495
496 d->netdev = &orion_ge00.dev; 496 d->netdev = &orion_ge00.dev;
497 for (i = 0; i < d->nr_chips; i++) 497 for (i = 0; i < d->nr_chips; i++)
498 d->chip[i].host_dev = &orion_ge00_shared.dev; 498 d->chip[i].host_dev = &orion_ge_mvmdio.dev;
499 orion_switch_device.dev.platform_data = d; 499 orion_switch_device.dev.platform_data = d;
500 500
501 platform_device_register(&orion_switch_device); 501 platform_device_register(&orion_switch_device);
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 0dc42c5082b7..5f8229e24fe6 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -3,7 +3,6 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <asm/reg.h>
7 6
8/* bytes per L1 cache line */ 7/* bytes per L1 cache line */
9#if defined(CONFIG_8xx) || defined(CONFIG_403GCX) 8#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
@@ -40,12 +39,6 @@ struct ppc64_caches {
40}; 39};
41 40
42extern struct ppc64_caches ppc64_caches; 41extern struct ppc64_caches ppc64_caches;
43
44static inline void logmpp(u64 x)
45{
46 asm volatile(PPC_LOGMPP(R1) : : "r" (x));
47}
48
49#endif /* __powerpc64__ && ! __ASSEMBLY__ */ 42#endif /* __powerpc64__ && ! __ASSEMBLY__ */
50 43
51#if defined(__ASSEMBLY__) 44#if defined(__ASSEMBLY__)
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 827a38d7a9db..887c259556df 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -297,8 +297,6 @@ struct kvmppc_vcore {
297 u32 arch_compat; 297 u32 arch_compat;
298 ulong pcr; 298 ulong pcr;
299 ulong dpdes; /* doorbell state (POWER8) */ 299 ulong dpdes; /* doorbell state (POWER8) */
300 void *mpp_buffer; /* Micro Partition Prefetch buffer */
301 bool mpp_buffer_is_valid;
302 ulong conferring_threads; 300 ulong conferring_threads;
303}; 301};
304 302
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 790f5d1d9a46..7ab04fc59e24 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -141,7 +141,6 @@
141#define PPC_INST_ISEL 0x7c00001e 141#define PPC_INST_ISEL 0x7c00001e
142#define PPC_INST_ISEL_MASK 0xfc00003e 142#define PPC_INST_ISEL_MASK 0xfc00003e
143#define PPC_INST_LDARX 0x7c0000a8 143#define PPC_INST_LDARX 0x7c0000a8
144#define PPC_INST_LOGMPP 0x7c0007e4
145#define PPC_INST_LSWI 0x7c0004aa 144#define PPC_INST_LSWI 0x7c0004aa
146#define PPC_INST_LSWX 0x7c00042a 145#define PPC_INST_LSWX 0x7c00042a
147#define PPC_INST_LWARX 0x7c000028 146#define PPC_INST_LWARX 0x7c000028
@@ -285,20 +284,6 @@
285#define __PPC_EH(eh) 0 284#define __PPC_EH(eh) 0
286#endif 285#endif
287 286
288/* POWER8 Micro Partition Prefetch (MPP) parameters */
289/* Address mask is common for LOGMPP instruction and MPPR SPR */
290#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL
291
292/* Bits 60 and 61 of MPP SPR should be set to one of the following */
293/* Aborting the fetch is indeed setting 00 in the table size bits */
294#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60)
295#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60)
296
297/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */
298#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54)
299#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54)
300#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54)
301
302/* Deal with instructions that older assemblers aren't aware of */ 287/* Deal with instructions that older assemblers aren't aware of */
303#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ 288#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
304 __PPC_RA(a) | __PPC_RB(b)) 289 __PPC_RA(a) | __PPC_RB(b))
@@ -307,8 +292,6 @@
307#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ 292#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
308 ___PPC_RT(t) | ___PPC_RA(a) | \ 293 ___PPC_RT(t) | ___PPC_RA(a) | \
309 ___PPC_RB(b) | __PPC_EH(eh)) 294 ___PPC_RB(b) | __PPC_EH(eh))
310#define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \
311 __PPC_RB(b))
312#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ 295#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
313 ___PPC_RT(t) | ___PPC_RA(a) | \ 296 ___PPC_RT(t) | ___PPC_RA(a) | \
314 ___PPC_RB(b) | __PPC_EH(eh)) 297 ___PPC_RB(b) | __PPC_EH(eh))
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index aa1cc5f015ee..a908ada8e0a5 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -226,7 +226,6 @@
226#define CTRL_TE 0x00c00000 /* thread enable */ 226#define CTRL_TE 0x00c00000 /* thread enable */
227#define CTRL_RUNLATCH 0x1 227#define CTRL_RUNLATCH 0x1
228#define SPRN_DAWR 0xB4 228#define SPRN_DAWR 0xB4
229#define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */
230#define SPRN_RPR 0xBA /* Relative Priority Register */ 229#define SPRN_RPR 0xBA /* Relative Priority Register */
231#define SPRN_CIABR 0xBB 230#define SPRN_CIABR 0xBB
232#define CIABR_PRIV 0x3 231#define CIABR_PRIV 0x3
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 84bf934cf748..5a753fae8265 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -1043,6 +1043,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
1043 if (!capable(CAP_SYS_ADMIN)) 1043 if (!capable(CAP_SYS_ADMIN))
1044 return -EPERM; 1044 return -EPERM;
1045 1045
1046 if (!rtas.entry)
1047 return -EINVAL;
1048
1046 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0) 1049 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
1047 return -EFAULT; 1050 return -EFAULT;
1048 1051
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 228049786888..9c26c5a96ea2 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -36,7 +36,6 @@
36 36
37#include <asm/reg.h> 37#include <asm/reg.h>
38#include <asm/cputable.h> 38#include <asm/cputable.h>
39#include <asm/cache.h>
40#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
41#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
42#include <asm/uaccess.h> 41#include <asm/uaccess.h>
@@ -75,12 +74,6 @@
75 74
76static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); 75static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
77 76
78#if defined(CONFIG_PPC_64K_PAGES)
79#define MPP_BUFFER_ORDER 0
80#elif defined(CONFIG_PPC_4K_PAGES)
81#define MPP_BUFFER_ORDER 3
82#endif
83
84static int dynamic_mt_modes = 6; 77static int dynamic_mt_modes = 6;
85module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR); 78module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
86MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); 79MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
@@ -1455,13 +1448,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
1455 vcore->kvm = kvm; 1448 vcore->kvm = kvm;
1456 INIT_LIST_HEAD(&vcore->preempt_list); 1449 INIT_LIST_HEAD(&vcore->preempt_list);
1457 1450
1458 vcore->mpp_buffer_is_valid = false;
1459
1460 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1461 vcore->mpp_buffer = (void *)__get_free_pages(
1462 GFP_KERNEL|__GFP_ZERO,
1463 MPP_BUFFER_ORDER);
1464
1465 return vcore; 1451 return vcore;
1466} 1452}
1467 1453
@@ -1894,33 +1880,6 @@ static int on_primary_thread(void)
1894 return 1; 1880 return 1;
1895} 1881}
1896 1882
1897static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
1898{
1899 phys_addr_t phy_addr, mpp_addr;
1900
1901 phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
1902 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1903
1904 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
1905 logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
1906
1907 vc->mpp_buffer_is_valid = true;
1908}
1909
1910static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
1911{
1912 phys_addr_t phy_addr, mpp_addr;
1913
1914 phy_addr = virt_to_phys(vc->mpp_buffer);
1915 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1916
1917 /* We must abort any in-progress save operations to ensure
1918 * the table is valid so that prefetch engine knows when to
1919 * stop prefetching. */
1920 logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
1921 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
1922}
1923
1924/* 1883/*
1925 * A list of virtual cores for each physical CPU. 1884 * A list of virtual cores for each physical CPU.
1926 * These are vcores that could run but their runner VCPU tasks are 1885 * These are vcores that could run but their runner VCPU tasks are
@@ -2471,14 +2430,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2471 2430
2472 srcu_idx = srcu_read_lock(&vc->kvm->srcu); 2431 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
2473 2432
2474 if (vc->mpp_buffer_is_valid)
2475 kvmppc_start_restoring_l2_cache(vc);
2476
2477 __kvmppc_vcore_entry(); 2433 __kvmppc_vcore_entry();
2478 2434
2479 if (vc->mpp_buffer)
2480 kvmppc_start_saving_l2_cache(vc);
2481
2482 srcu_read_unlock(&vc->kvm->srcu, srcu_idx); 2435 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
2483 2436
2484 spin_lock(&vc->lock); 2437 spin_lock(&vc->lock);
@@ -3073,14 +3026,8 @@ static void kvmppc_free_vcores(struct kvm *kvm)
3073{ 3026{
3074 long int i; 3027 long int i;
3075 3028
3076 for (i = 0; i < KVM_MAX_VCORES; ++i) { 3029 for (i = 0; i < KVM_MAX_VCORES; ++i)
3077 if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
3078 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
3079 free_pages((unsigned long)vc->mpp_buffer,
3080 MPP_BUFFER_ORDER);
3081 }
3082 kfree(kvm->arch.vcores[i]); 3030 kfree(kvm->arch.vcores[i]);
3083 }
3084 kvm->arch.online_vcores = 0; 3031 kvm->arch.online_vcores = 0;
3085} 3032}
3086 3033
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 8f70ba681a78..ca264833ee64 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -171,7 +171,26 @@ static void pnv_smp_cpu_kill_self(void)
171 * so clear LPCR:PECE1. We keep PECE2 enabled. 171 * so clear LPCR:PECE1. We keep PECE2 enabled.
172 */ 172 */
173 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); 173 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
174
175 /*
176 * Hard-disable interrupts, and then clear irq_happened flags
177 * that we can safely ignore while off-line, since they
178 * are for things for which we do no processing when off-line
179 * (or in the case of HMI, all the processing we need to do
180 * is done in lower-level real-mode code).
181 */
182 hard_irq_disable();
183 local_paca->irq_happened &= ~(PACA_IRQ_DEC | PACA_IRQ_HMI);
184
174 while (!generic_check_cpu_restart(cpu)) { 185 while (!generic_check_cpu_restart(cpu)) {
186 /*
187 * Clear IPI flag, since we don't handle IPIs while
188 * offline, except for those when changing micro-threading
189 * mode, which are handled explicitly below, and those
190 * for coming online, which are handled via
191 * generic_check_cpu_restart() calls.
192 */
193 kvmppc_set_host_ipi(cpu, 0);
175 194
176 ppc64_runlatch_off(); 195 ppc64_runlatch_off();
177 196
@@ -196,20 +215,20 @@ static void pnv_smp_cpu_kill_self(void)
196 * having finished executing in a KVM guest, then srr1 215 * having finished executing in a KVM guest, then srr1
197 * contains 0. 216 * contains 0.
198 */ 217 */
199 if ((srr1 & wmask) == SRR1_WAKEEE) { 218 if (((srr1 & wmask) == SRR1_WAKEEE) ||
219 (local_paca->irq_happened & PACA_IRQ_EE)) {
200 icp_native_flush_interrupt(); 220 icp_native_flush_interrupt();
201 local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
202 smp_mb();
203 } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { 221 } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
204 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 222 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
205 asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); 223 asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
206 kvmppc_set_host_ipi(cpu, 0);
207 } 224 }
225 local_paca->irq_happened &= ~(PACA_IRQ_EE | PACA_IRQ_DBELL);
226 smp_mb();
208 227
209 if (cpu_core_split_required()) 228 if (cpu_core_split_required())
210 continue; 229 continue;
211 230
212 if (!generic_check_cpu_restart(cpu)) 231 if (srr1 && !generic_check_cpu_restart(cpu))
213 DBG("CPU%d Unexpected exit while offline !\n", cpu); 232 DBG("CPU%d Unexpected exit while offline !\n", cpu);
214 } 233 }
215 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); 234 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 098ab3333e7c..e3abe6f3156d 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -70,8 +70,8 @@ KBUILD_AFLAGS += $(ARCH_INCLUDE)
70 70
71USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \ 71USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \
72 $(ARCH_INCLUDE) $(MODE_INCLUDE) $(filter -I%,$(CFLAGS)) \ 72 $(ARCH_INCLUDE) $(MODE_INCLUDE) $(filter -I%,$(CFLAGS)) \
73 -D_FILE_OFFSET_BITS=64 -idirafter include \ 73 -D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \
74 -D__KERNEL__ -D__UM_HOST__ 74 -idirafter $(obj)/include -D__KERNEL__ -D__UM_HOST__
75 75
76#This will adjust *FLAGS accordingly to the platform. 76#This will adjust *FLAGS accordingly to the platform.
77include $(ARCH_DIR)/Makefile-os-$(OS) 77include $(ARCH_DIR)/Makefile-os-$(OS)
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index d8a9fce6ee2e..98783dd0fa2e 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -220,7 +220,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
220 show_regs(container_of(regs, struct pt_regs, regs)); 220 show_regs(container_of(regs, struct pt_regs, regs));
221 panic("Segfault with no mm"); 221 panic("Segfault with no mm");
222 } 222 }
223 else if (!is_user && address < TASK_SIZE) { 223 else if (!is_user && address > PAGE_SIZE && address < TASK_SIZE) {
224 show_regs(container_of(regs, struct pt_regs, regs)); 224 show_regs(container_of(regs, struct pt_regs, regs));
225 panic("Kernel tried to access user memory at addr 0x%lx, ip 0x%lx", 225 panic("Kernel tried to access user memory at addr 0x%lx, ip 0x%lx",
226 address, ip); 226 address, ip);
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index e3ee4a51ef63..3f02d4232812 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -96,7 +96,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
96 "ret = %d\n", -n); 96 "ret = %d\n", -n);
97 ret = n; 97 ret = n;
98 } 98 }
99 CATCH_EINTR(waitpid(pid, NULL, __WCLONE)); 99 CATCH_EINTR(waitpid(pid, NULL, __WALL));
100 } 100 }
101 101
102out_free2: 102out_free2:
@@ -129,7 +129,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
129 return err; 129 return err;
130 } 130 }
131 if (stack_out == NULL) { 131 if (stack_out == NULL) {
132 CATCH_EINTR(pid = waitpid(pid, &status, __WCLONE)); 132 CATCH_EINTR(pid = waitpid(pid, &status, __WALL));
133 if (pid < 0) { 133 if (pid < 0) {
134 err = -errno; 134 err = -errno;
135 printk(UM_KERN_ERR "run_helper_thread - wait failed, " 135 printk(UM_KERN_ERR "run_helper_thread - wait failed, "
@@ -148,7 +148,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
148int helper_wait(int pid) 148int helper_wait(int pid)
149{ 149{
150 int ret, status; 150 int ret, status;
151 int wflags = __WCLONE; 151 int wflags = __WALL;
152 152
153 CATCH_EINTR(ret = waitpid(pid, &status, wflags)); 153 CATCH_EINTR(ret = waitpid(pid, &status, wflags));
154 if (ret < 0) { 154 if (ret < 0) {
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index ee1b6d346b98..db51c1f27446 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -667,6 +667,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
667 bool conout_found = false; 667 bool conout_found = false;
668 void *dummy = NULL; 668 void *dummy = NULL;
669 u32 h = handles[i]; 669 u32 h = handles[i];
670 u32 current_fb_base;
670 671
671 status = efi_call_early(handle_protocol, h, 672 status = efi_call_early(handle_protocol, h,
672 proto, (void **)&gop32); 673 proto, (void **)&gop32);
@@ -678,7 +679,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
678 if (status == EFI_SUCCESS) 679 if (status == EFI_SUCCESS)
679 conout_found = true; 680 conout_found = true;
680 681
681 status = __gop_query32(gop32, &info, &size, &fb_base); 682 status = __gop_query32(gop32, &info, &size, &current_fb_base);
682 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 683 if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
683 /* 684 /*
684 * Systems that use the UEFI Console Splitter may 685 * Systems that use the UEFI Console Splitter may
@@ -692,6 +693,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
692 pixel_format = info->pixel_format; 693 pixel_format = info->pixel_format;
693 pixel_info = info->pixel_information; 694 pixel_info = info->pixel_information;
694 pixels_per_scan_line = info->pixels_per_scan_line; 695 pixels_per_scan_line = info->pixels_per_scan_line;
696 fb_base = current_fb_base;
695 697
696 /* 698 /*
697 * Once we've found a GOP supporting ConOut, 699 * Once we've found a GOP supporting ConOut,
@@ -770,6 +772,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
770 bool conout_found = false; 772 bool conout_found = false;
771 void *dummy = NULL; 773 void *dummy = NULL;
772 u64 h = handles[i]; 774 u64 h = handles[i];
775 u32 current_fb_base;
773 776
774 status = efi_call_early(handle_protocol, h, 777 status = efi_call_early(handle_protocol, h,
775 proto, (void **)&gop64); 778 proto, (void **)&gop64);
@@ -781,7 +784,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
781 if (status == EFI_SUCCESS) 784 if (status == EFI_SUCCESS)
782 conout_found = true; 785 conout_found = true;
783 786
784 status = __gop_query64(gop64, &info, &size, &fb_base); 787 status = __gop_query64(gop64, &info, &size, &current_fb_base);
785 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 788 if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
786 /* 789 /*
787 * Systems that use the UEFI Console Splitter may 790 * Systems that use the UEFI Console Splitter may
@@ -795,6 +798,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
795 pixel_format = info->pixel_format; 798 pixel_format = info->pixel_format;
796 pixel_info = info->pixel_information; 799 pixel_info = info->pixel_information;
797 pixels_per_scan_line = info->pixels_per_scan_line; 800 pixels_per_scan_line = info->pixels_per_scan_line;
801 fb_base = current_fb_base;
798 802
799 /* 803 /*
800 * Once we've found a GOP supporting ConOut, 804 * Once we've found a GOP supporting ConOut,
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index e4661196994e..ff8b9a17dc4b 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -27,12 +27,11 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
27 function. */ 27 function. */
28 28
29#define __HAVE_ARCH_MEMCPY 1 29#define __HAVE_ARCH_MEMCPY 1
30extern void *memcpy(void *to, const void *from, size_t len);
30extern void *__memcpy(void *to, const void *from, size_t len); 31extern void *__memcpy(void *to, const void *from, size_t len);
31 32
32#ifndef CONFIG_KMEMCHECK 33#ifndef CONFIG_KMEMCHECK
33#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 34#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
34extern void *memcpy(void *to, const void *from, size_t len);
35#else
36#define memcpy(dst, src, len) \ 35#define memcpy(dst, src, len) \
37({ \ 36({ \
38 size_t __len = (len); \ 37 size_t __len = (len); \
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 5c60bb162622..bb6bfc01cb82 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2907,6 +2907,7 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2907 struct irq_data *irq_data; 2907 struct irq_data *irq_data;
2908 struct mp_chip_data *data; 2908 struct mp_chip_data *data;
2909 struct irq_alloc_info *info = arg; 2909 struct irq_alloc_info *info = arg;
2910 unsigned long flags;
2910 2911
2911 if (!info || nr_irqs > 1) 2912 if (!info || nr_irqs > 1)
2912 return -EINVAL; 2913 return -EINVAL;
@@ -2939,11 +2940,14 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2939 2940
2940 cfg = irqd_cfg(irq_data); 2941 cfg = irqd_cfg(irq_data);
2941 add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin); 2942 add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin);
2943
2944 local_irq_save(flags);
2942 if (info->ioapic_entry) 2945 if (info->ioapic_entry)
2943 mp_setup_entry(cfg, data, info->ioapic_entry); 2946 mp_setup_entry(cfg, data, info->ioapic_entry);
2944 mp_register_handler(virq, data->trigger); 2947 mp_register_handler(virq, data->trigger);
2945 if (virq < nr_legacy_irqs()) 2948 if (virq < nr_legacy_irqs())
2946 legacy_pic->mask(virq); 2949 legacy_pic->mask(virq);
2950 local_irq_restore(flags);
2947 2951
2948 apic_printk(APIC_VERBOSE, KERN_DEBUG 2952 apic_printk(APIC_VERBOSE, KERN_DEBUG
2949 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n", 2953 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n",
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 39e585a554b7..e28db181e4fc 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -550,14 +550,14 @@ unsigned long get_wchan(struct task_struct *p)
550 if (sp < bottom || sp > top) 550 if (sp < bottom || sp > top)
551 return 0; 551 return 0;
552 552
553 fp = READ_ONCE(*(unsigned long *)sp); 553 fp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
554 do { 554 do {
555 if (fp < bottom || fp > top) 555 if (fp < bottom || fp > top)
556 return 0; 556 return 0;
557 ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long))); 557 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
558 if (!in_sched_functions(ip)) 558 if (!in_sched_functions(ip))
559 return ip; 559 return ip;
560 fp = READ_ONCE(*(unsigned long *)fp); 560 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
561 } while (count++ < 16 && p->state != TASK_RUNNING); 561 } while (count++ < 16 && p->state != TASK_RUNNING);
562 return 0; 562 return 0;
563} 563}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index fdb7f2a2d328..a3cccbfc5f77 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1173,6 +1173,14 @@ void __init setup_arch(char **cmdline_p)
1173 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, 1173 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
1174 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 1174 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1175 KERNEL_PGD_PTRS); 1175 KERNEL_PGD_PTRS);
1176
1177 /*
1178 * sync back low identity map too. It is used for example
1179 * in the 32-bit EFI stub.
1180 */
1181 clone_pgd_range(initial_page_table,
1182 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1183 KERNEL_PGD_PTRS);
1176#endif 1184#endif
1177 1185
1178 tboot_probe(); 1186 tboot_probe();
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index e0c198e5f920..892ee2e5ecbc 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
509 */ 509 */
510#define UDELAY_10MS_DEFAULT 10000 510#define UDELAY_10MS_DEFAULT 10000
511 511
512static unsigned int init_udelay = UDELAY_10MS_DEFAULT; 512static unsigned int init_udelay = INT_MAX;
513 513
514static int __init cpu_init_udelay(char *str) 514static int __init cpu_init_udelay(char *str)
515{ 515{
@@ -522,13 +522,16 @@ early_param("cpu_init_udelay", cpu_init_udelay);
522static void __init smp_quirk_init_udelay(void) 522static void __init smp_quirk_init_udelay(void)
523{ 523{
524 /* if cmdline changed it from default, leave it alone */ 524 /* if cmdline changed it from default, leave it alone */
525 if (init_udelay != UDELAY_10MS_DEFAULT) 525 if (init_udelay != INT_MAX)
526 return; 526 return;
527 527
528 /* if modern processor, use no delay */ 528 /* if modern processor, use no delay */
529 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || 529 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
530 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) 530 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF)))
531 init_udelay = 0; 531 init_udelay = 0;
532
533 /* else, use legacy delay */
534 init_udelay = UDELAY_10MS_DEFAULT;
532} 535}
533 536
534/* 537/*
@@ -657,7 +660,9 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
657 /* 660 /*
658 * Give the other CPU some time to accept the IPI. 661 * Give the other CPU some time to accept the IPI.
659 */ 662 */
660 if (init_udelay) 663 if (init_udelay == 0)
664 udelay(10);
665 else
661 udelay(300); 666 udelay(300);
662 667
663 pr_debug("Startup point 1\n"); 668 pr_debug("Startup point 1\n");
@@ -668,7 +673,9 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
668 /* 673 /*
669 * Give the other CPU some time to accept the IPI. 674 * Give the other CPU some time to accept the IPI.
670 */ 675 */
671 if (init_udelay) 676 if (init_udelay == 0)
677 udelay(10);
678 else
672 udelay(200); 679 udelay(200);
673 680
674 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 681 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
index 9701a4fd7bf2..836a1eb5df43 100644
--- a/arch/x86/um/ldt.c
+++ b/arch/x86/um/ldt.c
@@ -12,7 +12,10 @@
12#include <skas.h> 12#include <skas.h>
13#include <sysdep/tls.h> 13#include <sysdep/tls.h>
14 14
15extern int modify_ldt(int func, void *ptr, unsigned long bytecount); 15static inline int modify_ldt (int func, void *ptr, unsigned long bytecount)
16{
17 return syscall(__NR_modify_ldt, func, ptr, bytecount);
18}
16 19
17static long write_ldt_entry(struct mm_id *mm_idp, int func, 20static long write_ldt_entry(struct mm_id *mm_idp, int func,
18 struct user_desc *desc, void **addr, int done) 21 struct user_desc *desc, void **addr, int done)
diff --git a/block/blk-core.c b/block/blk-core.c
index 2eb722d48773..18e92a6645e2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -576,7 +576,7 @@ void blk_cleanup_queue(struct request_queue *q)
576 q->queue_lock = &q->__queue_lock; 576 q->queue_lock = &q->__queue_lock;
577 spin_unlock_irq(lock); 577 spin_unlock_irq(lock);
578 578
579 bdi_destroy(&q->backing_dev_info); 579 bdi_unregister(&q->backing_dev_info);
580 580
581 /* @q is and will stay empty, shutdown and put */ 581 /* @q is and will stay empty, shutdown and put */
582 blk_put_queue(q); 582 blk_put_queue(q);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index ed96474d75cb..ec2d11915142 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -641,6 +641,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
641{ 641{
642 bt_free(&tags->bitmap_tags); 642 bt_free(&tags->bitmap_tags);
643 bt_free(&tags->breserved_tags); 643 bt_free(&tags->breserved_tags);
644 free_cpumask_var(tags->cpumask);
644 kfree(tags); 645 kfree(tags);
645} 646}
646 647
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7785ae96267a..85f014327342 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2296,10 +2296,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2296 int i; 2296 int i;
2297 2297
2298 for (i = 0; i < set->nr_hw_queues; i++) { 2298 for (i = 0; i < set->nr_hw_queues; i++) {
2299 if (set->tags[i]) { 2299 if (set->tags[i])
2300 blk_mq_free_rq_map(set, set->tags[i], i); 2300 blk_mq_free_rq_map(set, set->tags[i], i);
2301 free_cpumask_var(set->tags[i]->cpumask);
2302 }
2303 } 2301 }
2304 2302
2305 kfree(set->tags); 2303 kfree(set->tags);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3e44a9da2a13..07b42f5ad797 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -540,6 +540,7 @@ static void blk_release_queue(struct kobject *kobj)
540 struct request_queue *q = 540 struct request_queue *q =
541 container_of(kobj, struct request_queue, kobj); 541 container_of(kobj, struct request_queue, kobj);
542 542
543 bdi_exit(&q->backing_dev_info);
543 blkcg_exit_queue(q); 544 blkcg_exit_queue(q);
544 545
545 if (q->elevator) { 546 if (q->elevator) {
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 950fff9ce453..a12ff9863d7e 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -187,7 +187,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
187 * global one. Requires architecture specific dev_get_cma_area() helper 187 * global one. Requires architecture specific dev_get_cma_area() helper
188 * function. 188 * function.
189 */ 189 */
190struct page *dma_alloc_from_contiguous(struct device *dev, int count, 190struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
191 unsigned int align) 191 unsigned int align)
192{ 192{
193 if (align > CONFIG_CMA_ALIGNMENT) 193 if (align > CONFIG_CMA_ALIGNMENT)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 293495a75d3d..1b87623381e2 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -60,6 +60,7 @@ struct nbd_device {
60 bool disconnect; /* a disconnect has been requested by user */ 60 bool disconnect; /* a disconnect has been requested by user */
61 61
62 struct timer_list timeout_timer; 62 struct timer_list timeout_timer;
63 spinlock_t tasks_lock;
63 struct task_struct *task_recv; 64 struct task_struct *task_recv;
64 struct task_struct *task_send; 65 struct task_struct *task_send;
65 66
@@ -140,21 +141,23 @@ static void sock_shutdown(struct nbd_device *nbd)
140static void nbd_xmit_timeout(unsigned long arg) 141static void nbd_xmit_timeout(unsigned long arg)
141{ 142{
142 struct nbd_device *nbd = (struct nbd_device *)arg; 143 struct nbd_device *nbd = (struct nbd_device *)arg;
143 struct task_struct *task; 144 unsigned long flags;
144 145
145 if (list_empty(&nbd->queue_head)) 146 if (list_empty(&nbd->queue_head))
146 return; 147 return;
147 148
148 nbd->disconnect = true; 149 nbd->disconnect = true;
149 150
150 task = READ_ONCE(nbd->task_recv); 151 spin_lock_irqsave(&nbd->tasks_lock, flags);
151 if (task) 152
152 force_sig(SIGKILL, task); 153 if (nbd->task_recv)
154 force_sig(SIGKILL, nbd->task_recv);
153 155
154 task = READ_ONCE(nbd->task_send); 156 if (nbd->task_send)
155 if (task)
156 force_sig(SIGKILL, nbd->task_send); 157 force_sig(SIGKILL, nbd->task_send);
157 158
159 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
160
158 dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n"); 161 dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
159} 162}
160 163
@@ -403,17 +406,24 @@ static int nbd_thread_recv(struct nbd_device *nbd)
403{ 406{
404 struct request *req; 407 struct request *req;
405 int ret; 408 int ret;
409 unsigned long flags;
406 410
407 BUG_ON(nbd->magic != NBD_MAGIC); 411 BUG_ON(nbd->magic != NBD_MAGIC);
408 412
409 sk_set_memalloc(nbd->sock->sk); 413 sk_set_memalloc(nbd->sock->sk);
410 414
415 spin_lock_irqsave(&nbd->tasks_lock, flags);
411 nbd->task_recv = current; 416 nbd->task_recv = current;
417 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
412 418
413 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); 419 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
414 if (ret) { 420 if (ret) {
415 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); 421 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
422
423 spin_lock_irqsave(&nbd->tasks_lock, flags);
416 nbd->task_recv = NULL; 424 nbd->task_recv = NULL;
425 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
426
417 return ret; 427 return ret;
418 } 428 }
419 429
@@ -429,7 +439,9 @@ static int nbd_thread_recv(struct nbd_device *nbd)
429 439
430 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); 440 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
431 441
442 spin_lock_irqsave(&nbd->tasks_lock, flags);
432 nbd->task_recv = NULL; 443 nbd->task_recv = NULL;
444 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
433 445
434 if (signal_pending(current)) { 446 if (signal_pending(current)) {
435 siginfo_t info; 447 siginfo_t info;
@@ -534,8 +546,11 @@ static int nbd_thread_send(void *data)
534{ 546{
535 struct nbd_device *nbd = data; 547 struct nbd_device *nbd = data;
536 struct request *req; 548 struct request *req;
549 unsigned long flags;
537 550
551 spin_lock_irqsave(&nbd->tasks_lock, flags);
538 nbd->task_send = current; 552 nbd->task_send = current;
553 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
539 554
540 set_user_nice(current, MIN_NICE); 555 set_user_nice(current, MIN_NICE);
541 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { 556 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
@@ -572,7 +587,15 @@ static int nbd_thread_send(void *data)
572 nbd_handle_req(nbd, req); 587 nbd_handle_req(nbd, req);
573 } 588 }
574 589
590 spin_lock_irqsave(&nbd->tasks_lock, flags);
575 nbd->task_send = NULL; 591 nbd->task_send = NULL;
592 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
593
594 /* Clear maybe pending signals */
595 if (signal_pending(current)) {
596 siginfo_t info;
597 dequeue_signal_lock(current, &current->blocked, &info);
598 }
576 599
577 return 0; 600 return 0;
578} 601}
@@ -1052,6 +1075,7 @@ static int __init nbd_init(void)
1052 nbd_dev[i].magic = NBD_MAGIC; 1075 nbd_dev[i].magic = NBD_MAGIC;
1053 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); 1076 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
1054 spin_lock_init(&nbd_dev[i].queue_lock); 1077 spin_lock_init(&nbd_dev[i].queue_lock);
1078 spin_lock_init(&nbd_dev[i].tasks_lock);
1055 INIT_LIST_HEAD(&nbd_dev[i].queue_head); 1079 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
1056 mutex_init(&nbd_dev[i].tx_lock); 1080 mutex_init(&nbd_dev[i].tx_lock);
1057 init_timer(&nbd_dev[i].timeout_timer); 1081 init_timer(&nbd_dev[i].timeout_timer);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 6f04771f1019..ccc0c1f93daa 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -603,27 +603,31 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
603 struct nvme_iod *iod = ctx; 603 struct nvme_iod *iod = ctx;
604 struct request *req = iod_get_private(iod); 604 struct request *req = iod_get_private(iod);
605 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); 605 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
606
607 u16 status = le16_to_cpup(&cqe->status) >> 1; 606 u16 status = le16_to_cpup(&cqe->status) >> 1;
607 bool requeue = false;
608 int error = 0;
608 609
609 if (unlikely(status)) { 610 if (unlikely(status)) {
610 if (!(status & NVME_SC_DNR || blk_noretry_request(req)) 611 if (!(status & NVME_SC_DNR || blk_noretry_request(req))
611 && (jiffies - req->start_time) < req->timeout) { 612 && (jiffies - req->start_time) < req->timeout) {
612 unsigned long flags; 613 unsigned long flags;
613 614
615 requeue = true;
614 blk_mq_requeue_request(req); 616 blk_mq_requeue_request(req);
615 spin_lock_irqsave(req->q->queue_lock, flags); 617 spin_lock_irqsave(req->q->queue_lock, flags);
616 if (!blk_queue_stopped(req->q)) 618 if (!blk_queue_stopped(req->q))
617 blk_mq_kick_requeue_list(req->q); 619 blk_mq_kick_requeue_list(req->q);
618 spin_unlock_irqrestore(req->q->queue_lock, flags); 620 spin_unlock_irqrestore(req->q->queue_lock, flags);
619 return; 621 goto release_iod;
620 } 622 }
621 623
622 if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 624 if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
623 if (cmd_rq->ctx == CMD_CTX_CANCELLED) 625 if (cmd_rq->ctx == CMD_CTX_CANCELLED)
624 status = -EINTR; 626 error = -EINTR;
627 else
628 error = status;
625 } else { 629 } else {
626 status = nvme_error_status(status); 630 error = nvme_error_status(status);
627 } 631 }
628 } 632 }
629 633
@@ -635,8 +639,9 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
635 if (cmd_rq->aborted) 639 if (cmd_rq->aborted)
636 dev_warn(nvmeq->dev->dev, 640 dev_warn(nvmeq->dev->dev,
637 "completing aborted command with status:%04x\n", 641 "completing aborted command with status:%04x\n",
638 status); 642 error);
639 643
644release_iod:
640 if (iod->nents) { 645 if (iod->nents) {
641 dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents, 646 dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
642 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 647 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@ -649,7 +654,8 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
649 } 654 }
650 nvme_free_iod(nvmeq->dev, iod); 655 nvme_free_iod(nvmeq->dev, iod);
651 656
652 blk_mq_complete_request(req, status); 657 if (likely(!requeue))
658 blk_mq_complete_request(req, error);
653} 659}
654 660
655/* length is in bytes. gfp flags indicates whether we may sleep. */ 661/* length is in bytes. gfp flags indicates whether we may sleep. */
@@ -1804,7 +1810,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1804 1810
1805 length = (io.nblocks + 1) << ns->lba_shift; 1811 length = (io.nblocks + 1) << ns->lba_shift;
1806 meta_len = (io.nblocks + 1) * ns->ms; 1812 meta_len = (io.nblocks + 1) * ns->ms;
1807 metadata = (void __user *)(unsigned long)io.metadata; 1813 metadata = (void __user *)(uintptr_t)io.metadata;
1808 write = io.opcode & 1; 1814 write = io.opcode & 1;
1809 1815
1810 if (ns->ext) { 1816 if (ns->ext) {
@@ -1844,7 +1850,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1844 c.rw.metadata = cpu_to_le64(meta_dma); 1850 c.rw.metadata = cpu_to_le64(meta_dma);
1845 1851
1846 status = __nvme_submit_sync_cmd(ns->queue, &c, NULL, 1852 status = __nvme_submit_sync_cmd(ns->queue, &c, NULL,
1847 (void __user *)io.addr, length, NULL, 0); 1853 (void __user *)(uintptr_t)io.addr, length, NULL, 0);
1848 unmap: 1854 unmap:
1849 if (meta) { 1855 if (meta) {
1850 if (status == NVME_SC_SUCCESS && !write) { 1856 if (status == NVME_SC_SUCCESS && !write) {
@@ -1886,7 +1892,7 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
1886 timeout = msecs_to_jiffies(cmd.timeout_ms); 1892 timeout = msecs_to_jiffies(cmd.timeout_ms);
1887 1893
1888 status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c, 1894 status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c,
1889 NULL, (void __user *)cmd.addr, cmd.data_len, 1895 NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
1890 &cmd.result, timeout); 1896 &cmd.result, timeout);
1891 if (status >= 0) { 1897 if (status >= 0) {
1892 if (put_user(cmd.result, &ucmd->result)) 1898 if (put_user(cmd.result, &ucmd->result))
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index f5e49b639818..6f26cf38c6f9 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -96,6 +96,8 @@ static int atomic_dec_return_safe(atomic_t *v)
96#define RBD_MINORS_PER_MAJOR 256 96#define RBD_MINORS_PER_MAJOR 256
97#define RBD_SINGLE_MAJOR_PART_SHIFT 4 97#define RBD_SINGLE_MAJOR_PART_SHIFT 4
98 98
99#define RBD_MAX_PARENT_CHAIN_LEN 16
100
99#define RBD_SNAP_DEV_NAME_PREFIX "snap_" 101#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
100#define RBD_MAX_SNAP_NAME_LEN \ 102#define RBD_MAX_SNAP_NAME_LEN \
101 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1)) 103 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
@@ -426,7 +428,7 @@ static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
426 size_t count); 428 size_t count);
427static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf, 429static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
428 size_t count); 430 size_t count);
429static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping); 431static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
430static void rbd_spec_put(struct rbd_spec *spec); 432static void rbd_spec_put(struct rbd_spec *spec);
431 433
432static int rbd_dev_id_to_minor(int dev_id) 434static int rbd_dev_id_to_minor(int dev_id)
@@ -5131,44 +5133,51 @@ out_err:
5131 return ret; 5133 return ret;
5132} 5134}
5133 5135
5134static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) 5136/*
5137 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5138 * rbd_dev_image_probe() recursion depth, which means it's also the
5139 * length of the already discovered part of the parent chain.
5140 */
5141static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5135{ 5142{
5136 struct rbd_device *parent = NULL; 5143 struct rbd_device *parent = NULL;
5137 struct rbd_spec *parent_spec;
5138 struct rbd_client *rbdc;
5139 int ret; 5144 int ret;
5140 5145
5141 if (!rbd_dev->parent_spec) 5146 if (!rbd_dev->parent_spec)
5142 return 0; 5147 return 0;
5143 /*
5144 * We need to pass a reference to the client and the parent
5145 * spec when creating the parent rbd_dev. Images related by
5146 * parent/child relationships always share both.
5147 */
5148 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
5149 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5150 5148
5151 ret = -ENOMEM; 5149 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5152 parent = rbd_dev_create(rbdc, parent_spec, NULL); 5150 pr_info("parent chain is too long (%d)\n", depth);
5153 if (!parent) 5151 ret = -EINVAL;
5152 goto out_err;
5153 }
5154
5155 parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
5156 NULL);
5157 if (!parent) {
5158 ret = -ENOMEM;
5154 goto out_err; 5159 goto out_err;
5160 }
5161
5162 /*
5163 * Images related by parent/child relationships always share
5164 * rbd_client and spec/parent_spec, so bump their refcounts.
5165 */
5166 __rbd_get_client(rbd_dev->rbd_client);
5167 rbd_spec_get(rbd_dev->parent_spec);
5155 5168
5156 ret = rbd_dev_image_probe(parent, false); 5169 ret = rbd_dev_image_probe(parent, depth);
5157 if (ret < 0) 5170 if (ret < 0)
5158 goto out_err; 5171 goto out_err;
5172
5159 rbd_dev->parent = parent; 5173 rbd_dev->parent = parent;
5160 atomic_set(&rbd_dev->parent_ref, 1); 5174 atomic_set(&rbd_dev->parent_ref, 1);
5161
5162 return 0; 5175 return 0;
5176
5163out_err: 5177out_err:
5164 if (parent) { 5178 rbd_dev_unparent(rbd_dev);
5165 rbd_dev_unparent(rbd_dev); 5179 if (parent)
5166 rbd_dev_destroy(parent); 5180 rbd_dev_destroy(parent);
5167 } else {
5168 rbd_put_client(rbdc);
5169 rbd_spec_put(parent_spec);
5170 }
5171
5172 return ret; 5181 return ret;
5173} 5182}
5174 5183
@@ -5286,7 +5295,7 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5286 * parent), initiate a watch on its header object before using that 5295 * parent), initiate a watch on its header object before using that
5287 * object to get detailed information about the rbd image. 5296 * object to get detailed information about the rbd image.
5288 */ 5297 */
5289static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) 5298static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5290{ 5299{
5291 int ret; 5300 int ret;
5292 5301
@@ -5304,7 +5313,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5304 if (ret) 5313 if (ret)
5305 goto err_out_format; 5314 goto err_out_format;
5306 5315
5307 if (mapping) { 5316 if (!depth) {
5308 ret = rbd_dev_header_watch_sync(rbd_dev); 5317 ret = rbd_dev_header_watch_sync(rbd_dev);
5309 if (ret) { 5318 if (ret) {
5310 if (ret == -ENOENT) 5319 if (ret == -ENOENT)
@@ -5325,7 +5334,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5325 * Otherwise this is a parent image, identified by pool, image 5334 * Otherwise this is a parent image, identified by pool, image
5326 * and snap ids - need to fill in names for those ids. 5335 * and snap ids - need to fill in names for those ids.
5327 */ 5336 */
5328 if (mapping) 5337 if (!depth)
5329 ret = rbd_spec_fill_snap_id(rbd_dev); 5338 ret = rbd_spec_fill_snap_id(rbd_dev);
5330 else 5339 else
5331 ret = rbd_spec_fill_names(rbd_dev); 5340 ret = rbd_spec_fill_names(rbd_dev);
@@ -5347,12 +5356,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5347 * Need to warn users if this image is the one being 5356 * Need to warn users if this image is the one being
5348 * mapped and has a parent. 5357 * mapped and has a parent.
5349 */ 5358 */
5350 if (mapping && rbd_dev->parent_spec) 5359 if (!depth && rbd_dev->parent_spec)
5351 rbd_warn(rbd_dev, 5360 rbd_warn(rbd_dev,
5352 "WARNING: kernel layering is EXPERIMENTAL!"); 5361 "WARNING: kernel layering is EXPERIMENTAL!");
5353 } 5362 }
5354 5363
5355 ret = rbd_dev_probe_parent(rbd_dev); 5364 ret = rbd_dev_probe_parent(rbd_dev, depth);
5356 if (ret) 5365 if (ret)
5357 goto err_out_probe; 5366 goto err_out_probe;
5358 5367
@@ -5363,7 +5372,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5363err_out_probe: 5372err_out_probe:
5364 rbd_dev_unprobe(rbd_dev); 5373 rbd_dev_unprobe(rbd_dev);
5365err_out_watch: 5374err_out_watch:
5366 if (mapping) 5375 if (!depth)
5367 rbd_dev_header_unwatch_sync(rbd_dev); 5376 rbd_dev_header_unwatch_sync(rbd_dev);
5368out_header_name: 5377out_header_name:
5369 kfree(rbd_dev->header_name); 5378 kfree(rbd_dev->header_name);
@@ -5426,7 +5435,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
5426 spec = NULL; /* rbd_dev now owns this */ 5435 spec = NULL; /* rbd_dev now owns this */
5427 rbd_opts = NULL; /* rbd_dev now owns this */ 5436 rbd_opts = NULL; /* rbd_dev now owns this */
5428 5437
5429 rc = rbd_dev_image_probe(rbd_dev, true); 5438 rc = rbd_dev_image_probe(rbd_dev, 0);
5430 if (rc < 0) 5439 if (rc < 0)
5431 goto err_out_rbd_dev; 5440 goto err_out_rbd_dev;
5432 5441
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 611170896b8c..a69c02dadec0 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1956,7 +1956,8 @@ static void blkback_changed(struct xenbus_device *dev,
1956 break; 1956 break;
1957 /* Missed the backend's Closing state -- fallthrough */ 1957 /* Missed the backend's Closing state -- fallthrough */
1958 case XenbusStateClosing: 1958 case XenbusStateClosing:
1959 blkfront_closing(info); 1959 if (info)
1960 blkfront_closing(info);
1960 break; 1961 break;
1961 } 1962 }
1962} 1963}
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 7d9879e166cf..7082c7268845 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -1184,11 +1184,12 @@ static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb,
1184 if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) 1184 if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
1185 break; 1185 break;
1186 target = cpumask_any_but(cpu_online_mask, cpu); 1186 target = cpumask_any_but(cpu_online_mask, cpu);
1187 if (target < 0) 1187 if (target >= nr_cpu_ids)
1188 break; 1188 break;
1189 perf_pmu_migrate_context(&dt->pmu, cpu, target); 1189 perf_pmu_migrate_context(&dt->pmu, cpu, target);
1190 cpumask_set_cpu(target, &dt->cpu); 1190 cpumask_set_cpu(target, &dt->cpu);
1191 WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0); 1191 if (ccn->irq)
1192 WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0);
1192 default: 1193 default:
1193 break; 1194 break;
1194 } 1195 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index efed11509f4a..ed2bbe5b10af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -294,10 +294,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
294 struct amdgpu_device *adev = dev_get_drvdata(dev); 294 struct amdgpu_device *adev = dev_get_drvdata(dev);
295 umode_t effective_mode = attr->mode; 295 umode_t effective_mode = attr->mode;
296 296
297 /* Skip limit attributes if DPM is not enabled */ 297 /* Skip attributes if DPM is not enabled */
298 if (!adev->pm.dpm_enabled && 298 if (!adev->pm.dpm_enabled &&
299 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 299 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
300 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 300 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
301 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
302 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
303 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
304 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
301 return 0; 305 return 0;
302 306
303 /* Skip fan attributes if fan is not present */ 307 /* Skip fan attributes if fan is not present */
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 9745ed3a9aef..7e9154c7f1db 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2997,6 +2997,9 @@ static int kv_dpm_late_init(void *handle)
2997 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2997 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2998 int ret; 2998 int ret;
2999 2999
3000 if (!amdgpu_dpm)
3001 return 0;
3002
3000 /* init the sysfs and debugfs files late */ 3003 /* init the sysfs and debugfs files late */
3001 ret = amdgpu_pm_sysfs_init(adev); 3004 ret = amdgpu_pm_sysfs_init(adev);
3002 if (ret) 3005 if (ret)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 5bca390d9ae2..809959d56d78 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1194,17 +1194,18 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
1194 1194
1195 list_for_each_entry(port, &mstb->ports, next) { 1195 list_for_each_entry(port, &mstb->ports, next) {
1196 if (port->port_num == port_num) { 1196 if (port->port_num == port_num) {
1197 if (!port->mstb) { 1197 mstb = port->mstb;
1198 if (!mstb) {
1198 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); 1199 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1199 return NULL; 1200 goto out;
1200 } 1201 }
1201 1202
1202 mstb = port->mstb;
1203 break; 1203 break;
1204 } 1204 }
1205 } 1205 }
1206 } 1206 }
1207 kref_get(&mstb->kref); 1207 kref_get(&mstb->kref);
1208out:
1208 mutex_unlock(&mgr->lock); 1209 mutex_unlock(&mgr->lock);
1209 return mstb; 1210 return mstb;
1210} 1211}
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index f6ecbda2c604..674341708033 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -143,7 +143,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
143} 143}
144 144
145/** 145/**
146 * i915_gem_shrink - Shrink buffer object caches completely 146 * i915_gem_shrink_all - Shrink buffer object caches completely
147 * @dev_priv: i915 device 147 * @dev_priv: i915 device
148 * 148 *
149 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all 149 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 8fd431bcdfd3..a96b9006a51e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -804,7 +804,10 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
804 * Also note, that the object created here is not currently a "first class" 804 * Also note, that the object created here is not currently a "first class"
805 * object, in that several ioctls are banned. These are the CPU access 805 * object, in that several ioctls are banned. These are the CPU access
806 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use 806 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
807 * direct access via your pointer rather than use those ioctls. 807 * direct access via your pointer rather than use those ioctls. Another
808 * restriction is that we do not allow userptr surfaces to be pinned to the
809 * hardware and so we reject any attempt to create a framebuffer out of a
810 * userptr.
808 * 811 *
809 * If you think this is a good interface to use to pass GPU memory between 812 * If you think this is a good interface to use to pass GPU memory between
810 * drivers, please use dma-buf instead. In fact, wherever possible use 813 * drivers, please use dma-buf instead. In fact, wherever possible use
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index cf418be7d30a..b2270d576979 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1724,6 +1724,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1724 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1724 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1725 } 1725 }
1726 1726
1727 /*
1728 * Apparently we need to have VGA mode enabled prior to changing
1729 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1730 * dividers, even though the register value does change.
1731 */
1732 I915_WRITE(reg, 0);
1733
1734 I915_WRITE(reg, dpll);
1735
1727 /* Wait for the clocks to stabilize. */ 1736 /* Wait for the clocks to stabilize. */
1728 POSTING_READ(reg); 1737 POSTING_READ(reg);
1729 udelay(150); 1738 udelay(150);
@@ -14107,6 +14116,11 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14107 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14116 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14108 struct drm_i915_gem_object *obj = intel_fb->obj; 14117 struct drm_i915_gem_object *obj = intel_fb->obj;
14109 14118
14119 if (obj->userptr.mm) {
14120 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14121 return -EINVAL;
14122 }
14123
14110 return drm_gem_handle_create(file, &obj->base, handle); 14124 return drm_gem_handle_create(file, &obj->base, handle);
14111} 14125}
14112 14126
@@ -14897,9 +14911,19 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
14897 /* restore vblank interrupts to correct state */ 14911 /* restore vblank interrupts to correct state */
14898 drm_crtc_vblank_reset(&crtc->base); 14912 drm_crtc_vblank_reset(&crtc->base);
14899 if (crtc->active) { 14913 if (crtc->active) {
14914 struct intel_plane *plane;
14915
14900 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); 14916 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
14901 update_scanline_offset(crtc); 14917 update_scanline_offset(crtc);
14902 drm_crtc_vblank_on(&crtc->base); 14918 drm_crtc_vblank_on(&crtc->base);
14919
14920 /* Disable everything but the primary plane */
14921 for_each_intel_plane_on_crtc(dev, crtc, plane) {
14922 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
14923 continue;
14924
14925 plane->disable_plane(&plane->base, &crtc->base);
14926 }
14903 } 14927 }
14904 14928
14905 /* We need to sanitize the plane -> pipe mapping first because this will 14929 /* We need to sanitize the plane -> pipe mapping first because this will
@@ -15067,38 +15091,25 @@ void i915_redisable_vga(struct drm_device *dev)
15067 i915_redisable_vga_power_on(dev); 15091 i915_redisable_vga_power_on(dev);
15068} 15092}
15069 15093
15070static bool primary_get_hw_state(struct intel_crtc *crtc) 15094static bool primary_get_hw_state(struct intel_plane *plane)
15071{ 15095{
15072 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 15096 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15073 15097
15074 return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE); 15098 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15075} 15099}
15076 15100
15077static void readout_plane_state(struct intel_crtc *crtc, 15101/* FIXME read out full plane state for all planes */
15078 struct intel_crtc_state *crtc_state) 15102static void readout_plane_state(struct intel_crtc *crtc)
15079{ 15103{
15080 struct intel_plane *p; 15104 struct drm_plane *primary = crtc->base.primary;
15081 struct intel_plane_state *plane_state; 15105 struct intel_plane_state *plane_state =
15082 bool active = crtc_state->base.active; 15106 to_intel_plane_state(primary->state);
15083
15084 for_each_intel_plane(crtc->base.dev, p) {
15085 if (crtc->pipe != p->pipe)
15086 continue;
15087
15088 plane_state = to_intel_plane_state(p->base.state);
15089 15107
15090 if (p->base.type == DRM_PLANE_TYPE_PRIMARY) { 15108 plane_state->visible =
15091 plane_state->visible = primary_get_hw_state(crtc); 15109 primary_get_hw_state(to_intel_plane(primary));
15092 if (plane_state->visible)
15093 crtc->base.state->plane_mask |=
15094 1 << drm_plane_index(&p->base);
15095 } else {
15096 if (active)
15097 p->disable_plane(&p->base, &crtc->base);
15098 15110
15099 plane_state->visible = false; 15111 if (plane_state->visible)
15100 } 15112 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15101 }
15102} 15113}
15103 15114
15104static void intel_modeset_readout_hw_state(struct drm_device *dev) 15115static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15121,34 +15132,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15121 crtc->base.state->active = crtc->active; 15132 crtc->base.state->active = crtc->active;
15122 crtc->base.enabled = crtc->active; 15133 crtc->base.enabled = crtc->active;
15123 15134
15124 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 15135 readout_plane_state(crtc);
15125 if (crtc->base.state->active) {
15126 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15127 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15128 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15129
15130 /*
15131 * The initial mode needs to be set in order to keep
15132 * the atomic core happy. It wants a valid mode if the
15133 * crtc's enabled, so we do the above call.
15134 *
15135 * At this point some state updated by the connectors
15136 * in their ->detect() callback has not run yet, so
15137 * no recalculation can be done yet.
15138 *
15139 * Even if we could do a recalculation and modeset
15140 * right now it would cause a double modeset if
15141 * fbdev or userspace chooses a different initial mode.
15142 *
15143 * If that happens, someone indicated they wanted a
15144 * mode change, which means it's safe to do a full
15145 * recalculation.
15146 */
15147 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15148 }
15149
15150 crtc->base.hwmode = crtc->config->base.adjusted_mode;
15151 readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state));
15152 15136
15153 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 15137 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15154 crtc->base.base.id, 15138 crtc->base.base.id,
@@ -15207,6 +15191,36 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15207 connector->base.name, 15191 connector->base.name,
15208 connector->base.encoder ? "enabled" : "disabled"); 15192 connector->base.encoder ? "enabled" : "disabled");
15209 } 15193 }
15194
15195 for_each_intel_crtc(dev, crtc) {
15196 crtc->base.hwmode = crtc->config->base.adjusted_mode;
15197
15198 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15199 if (crtc->base.state->active) {
15200 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15201 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15202 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15203
15204 /*
15205 * The initial mode needs to be set in order to keep
15206 * the atomic core happy. It wants a valid mode if the
15207 * crtc's enabled, so we do the above call.
15208 *
15209 * At this point some state updated by the connectors
15210 * in their ->detect() callback has not run yet, so
15211 * no recalculation can be done yet.
15212 *
15213 * Even if we could do a recalculation and modeset
15214 * right now it would cause a double modeset if
15215 * fbdev or userspace chooses a different initial mode.
15216 *
15217 * If that happens, someone indicated they wanted a
15218 * mode change, which means it's safe to do a full
15219 * recalculation.
15220 */
15221 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15222 }
15223 }
15210} 15224}
15211 15225
15212/* Scan out the current hw modeset state, 15226/* Scan out the current hw modeset state,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7412caedcf7f..29dd4488dc49 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1659,6 +1659,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1659 if (flush_domains) { 1659 if (flush_domains) {
1660 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 1660 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1661 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 1661 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1662 flags |= PIPE_CONTROL_FLUSH_ENABLE;
1662 } 1663 }
1663 1664
1664 if (invalidate_domains) { 1665 if (invalidate_domains) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6e6b8db996ef..61b451fbd09e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
347 if (flush_domains) { 347 if (flush_domains) {
348 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 348 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
349 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 349 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
350 flags |= PIPE_CONTROL_FLUSH_ENABLE;
350 } 351 }
351 if (invalidate_domains) { 352 if (invalidate_domains) {
352 flags |= PIPE_CONTROL_TLB_INVALIDATE; 353 flags |= PIPE_CONTROL_TLB_INVALIDATE;
@@ -418,6 +419,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
418 if (flush_domains) { 419 if (flush_domains) {
419 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 420 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
420 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 421 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
422 flags |= PIPE_CONTROL_FLUSH_ENABLE;
421 } 423 }
422 if (invalidate_domains) { 424 if (invalidate_domains) {
423 flags |= PIPE_CONTROL_TLB_INVALIDATE; 425 flags |= PIPE_CONTROL_TLB_INVALIDATE;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 2c9981512d27..41be584147b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -227,11 +227,12 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
227 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 227 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
228 struct nvkm_vma *vma; 228 struct nvkm_vma *vma;
229 229
230 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 230 if (is_power_of_2(nvbo->valid_domains))
231 rep->domain = nvbo->valid_domains;
232 else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
231 rep->domain = NOUVEAU_GEM_DOMAIN_GART; 233 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
232 else 234 else
233 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 235 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
234
235 rep->offset = nvbo->bo.offset; 236 rep->offset = nvbo->bo.offset;
236 if (cli->vm) { 237 if (cli->vm) {
237 vma = nouveau_bo_vma_find(nvbo, cli->vm); 238 vma = nouveau_bo_vma_find(nvbo, cli->vm);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 44489cce7458..6a0a176e26ec 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -717,10 +717,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
717 struct radeon_device *rdev = dev_get_drvdata(dev); 717 struct radeon_device *rdev = dev_get_drvdata(dev);
718 umode_t effective_mode = attr->mode; 718 umode_t effective_mode = attr->mode;
719 719
720 /* Skip limit attributes if DPM is not enabled */ 720 /* Skip attributes if DPM is not enabled */
721 if (rdev->pm.pm_method != PM_METHOD_DPM && 721 if (rdev->pm.pm_method != PM_METHOD_DPM &&
722 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 722 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
723 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 723 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
724 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
725 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
726 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
727 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
724 return 0; 728 return 0;
725 729
726 /* Skip fan attributes if fan is not present */ 730 /* Skip fan attributes if fan is not present */
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index ff30f8806880..fb9311110424 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -149,8 +149,6 @@
149#define ST_ACCEL_4_BDU_MASK 0x40 149#define ST_ACCEL_4_BDU_MASK 0x40
150#define ST_ACCEL_4_DRDY_IRQ_ADDR 0x21 150#define ST_ACCEL_4_DRDY_IRQ_ADDR 0x21
151#define ST_ACCEL_4_DRDY_IRQ_INT1_MASK 0x04 151#define ST_ACCEL_4_DRDY_IRQ_INT1_MASK 0x04
152#define ST_ACCEL_4_IG1_EN_ADDR 0x21
153#define ST_ACCEL_4_IG1_EN_MASK 0x08
154#define ST_ACCEL_4_MULTIREAD_BIT true 152#define ST_ACCEL_4_MULTIREAD_BIT true
155 153
156/* CUSTOM VALUES FOR SENSOR 5 */ 154/* CUSTOM VALUES FOR SENSOR 5 */
@@ -489,10 +487,6 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
489 .drdy_irq = { 487 .drdy_irq = {
490 .addr = ST_ACCEL_4_DRDY_IRQ_ADDR, 488 .addr = ST_ACCEL_4_DRDY_IRQ_ADDR,
491 .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK, 489 .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK,
492 .ig1 = {
493 .en_addr = ST_ACCEL_4_IG1_EN_ADDR,
494 .en_mask = ST_ACCEL_4_IG1_EN_MASK,
495 },
496 }, 490 },
497 .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT, 491 .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT,
498 .bootime = 2, /* guess */ 492 .bootime = 2, /* guess */
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index ebe415f10640..0c74869a540a 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -45,13 +45,18 @@
45#include <linux/types.h> 45#include <linux/types.h>
46#include <linux/gfp.h> 46#include <linux/gfp.h>
47#include <linux/err.h> 47#include <linux/err.h>
48#include <linux/regulator/consumer.h>
48 49
49#include <linux/iio/iio.h> 50#include <linux/iio/iio.h>
50 51
52#define TWL4030_USB_SEL_MADC_MCPC (1<<3)
53#define TWL4030_USB_CARKIT_ANA_CTRL 0xBB
54
51/** 55/**
52 * struct twl4030_madc_data - a container for madc info 56 * struct twl4030_madc_data - a container for madc info
53 * @dev: Pointer to device structure for madc 57 * @dev: Pointer to device structure for madc
54 * @lock: Mutex protecting this data structure 58 * @lock: Mutex protecting this data structure
59 * @regulator: Pointer to bias regulator for madc
55 * @requests: Array of request struct corresponding to SW1, SW2 and RT 60 * @requests: Array of request struct corresponding to SW1, SW2 and RT
56 * @use_second_irq: IRQ selection (main or co-processor) 61 * @use_second_irq: IRQ selection (main or co-processor)
57 * @imr: Interrupt mask register of MADC 62 * @imr: Interrupt mask register of MADC
@@ -60,6 +65,7 @@
60struct twl4030_madc_data { 65struct twl4030_madc_data {
61 struct device *dev; 66 struct device *dev;
62 struct mutex lock; /* mutex protecting this data structure */ 67 struct mutex lock; /* mutex protecting this data structure */
68 struct regulator *usb3v1;
63 struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS]; 69 struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS];
64 bool use_second_irq; 70 bool use_second_irq;
65 u8 imr; 71 u8 imr;
@@ -841,6 +847,32 @@ static int twl4030_madc_probe(struct platform_device *pdev)
841 } 847 }
842 twl4030_madc = madc; 848 twl4030_madc = madc;
843 849
850 /* Configure MADC[3:6] */
851 ret = twl_i2c_read_u8(TWL_MODULE_USB, &regval,
852 TWL4030_USB_CARKIT_ANA_CTRL);
853 if (ret) {
854 dev_err(&pdev->dev, "unable to read reg CARKIT_ANA_CTRL 0x%X\n",
855 TWL4030_USB_CARKIT_ANA_CTRL);
856 goto err_i2c;
857 }
858 regval |= TWL4030_USB_SEL_MADC_MCPC;
859 ret = twl_i2c_write_u8(TWL_MODULE_USB, regval,
860 TWL4030_USB_CARKIT_ANA_CTRL);
861 if (ret) {
862 dev_err(&pdev->dev, "unable to write reg CARKIT_ANA_CTRL 0x%X\n",
863 TWL4030_USB_CARKIT_ANA_CTRL);
864 goto err_i2c;
865 }
866
867 /* Enable 3v1 bias regulator for MADC[3:6] */
868 madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1");
869 if (IS_ERR(madc->usb3v1))
870 return -ENODEV;
871
872 ret = regulator_enable(madc->usb3v1);
873 if (ret)
874 dev_err(madc->dev, "could not enable 3v1 bias regulator\n");
875
844 ret = iio_device_register(iio_dev); 876 ret = iio_device_register(iio_dev);
845 if (ret) { 877 if (ret) {
846 dev_err(&pdev->dev, "could not register iio device\n"); 878 dev_err(&pdev->dev, "could not register iio device\n");
@@ -866,6 +898,8 @@ static int twl4030_madc_remove(struct platform_device *pdev)
866 twl4030_madc_set_current_generator(madc, 0, 0); 898 twl4030_madc_set_current_generator(madc, 0, 0);
867 twl4030_madc_set_power(madc, 0); 899 twl4030_madc_set_power(madc, 0);
868 900
901 regulator_disable(madc->usb3v1);
902
869 return 0; 903 return 0;
870} 904}
871 905
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 8f66c67ff0df..87471ef37198 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -508,12 +508,12 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
508 memset(&gid_attr, 0, sizeof(gid_attr)); 508 memset(&gid_attr, 0, sizeof(gid_attr));
509 gid_attr.ndev = ndev; 509 gid_attr.ndev = ndev;
510 510
511 mutex_lock(&table->lock);
511 ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT); 512 ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT);
512 513
513 /* Coudn't find default GID location */ 514 /* Coudn't find default GID location */
514 WARN_ON(ix < 0); 515 WARN_ON(ix < 0);
515 516
516 mutex_lock(&table->lock);
517 if (!__ib_cache_gid_get(ib_dev, port, ix, 517 if (!__ib_cache_gid_get(ib_dev, port, ix,
518 &current_gid, &current_gid_attr) && 518 &current_gid, &current_gid_attr) &&
519 mode == IB_CACHE_GID_DEFAULT_MODE_SET && 519 mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index ea4db9c1d44f..4f918b929eca 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -835,6 +835,11 @@ retest:
835 case IB_CM_SIDR_REQ_RCVD: 835 case IB_CM_SIDR_REQ_RCVD:
836 spin_unlock_irq(&cm_id_priv->lock); 836 spin_unlock_irq(&cm_id_priv->lock);
837 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 837 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
838 spin_lock_irq(&cm.lock);
839 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
840 rb_erase(&cm_id_priv->sidr_id_node,
841 &cm.remote_sidr_table);
842 spin_unlock_irq(&cm.lock);
838 break; 843 break;
839 case IB_CM_REQ_SENT: 844 case IB_CM_REQ_SENT:
840 case IB_CM_MRA_REQ_RCVD: 845 case IB_CM_MRA_REQ_RCVD:
@@ -3172,7 +3177,10 @@ int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3172 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3177 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3173 3178
3174 spin_lock_irqsave(&cm.lock, flags); 3179 spin_lock_irqsave(&cm.lock, flags);
3175 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 3180 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3181 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3182 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3183 }
3176 spin_unlock_irqrestore(&cm.lock, flags); 3184 spin_unlock_irqrestore(&cm.lock, flags);
3177 return 0; 3185 return 0;
3178 3186
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 59a2dafc8c57..36b12d560e17 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1067,14 +1067,14 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
1067 sizeof(req->local_gid)); 1067 sizeof(req->local_gid));
1068 req->has_gid = true; 1068 req->has_gid = true;
1069 req->service_id = req_param->primary_path->service_id; 1069 req->service_id = req_param->primary_path->service_id;
1070 req->pkey = req_param->bth_pkey; 1070 req->pkey = be16_to_cpu(req_param->primary_path->pkey);
1071 break; 1071 break;
1072 case IB_CM_SIDR_REQ_RECEIVED: 1072 case IB_CM_SIDR_REQ_RECEIVED:
1073 req->device = sidr_param->listen_id->device; 1073 req->device = sidr_param->listen_id->device;
1074 req->port = sidr_param->port; 1074 req->port = sidr_param->port;
1075 req->has_gid = false; 1075 req->has_gid = false;
1076 req->service_id = sidr_param->service_id; 1076 req->service_id = sidr_param->service_id;
1077 req->pkey = sidr_param->bth_pkey; 1077 req->pkey = sidr_param->pkey;
1078 break; 1078 break;
1079 default: 1079 default:
1080 return -EINVAL; 1080 return -EINVAL;
@@ -1324,7 +1324,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
1324 bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id), 1324 bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id),
1325 cma_port_from_service_id(req.service_id)); 1325 cma_port_from_service_id(req.service_id));
1326 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); 1326 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
1327 if (IS_ERR(id_priv)) { 1327 if (IS_ERR(id_priv) && *net_dev) {
1328 dev_put(*net_dev); 1328 dev_put(*net_dev);
1329 *net_dev = NULL; 1329 *net_dev = NULL;
1330 } 1330 }
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 6b24cba1e474..178f98482e13 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -250,25 +250,44 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
250 u8 port, struct net_device *ndev) 250 u8 port, struct net_device *ndev)
251{ 251{
252 struct in_device *in_dev; 252 struct in_device *in_dev;
253 struct sin_list {
254 struct list_head list;
255 struct sockaddr_in ip;
256 };
257 struct sin_list *sin_iter;
258 struct sin_list *sin_temp;
253 259
260 LIST_HEAD(sin_list);
254 if (ndev->reg_state >= NETREG_UNREGISTERING) 261 if (ndev->reg_state >= NETREG_UNREGISTERING)
255 return; 262 return;
256 263
257 in_dev = in_dev_get(ndev); 264 rcu_read_lock();
258 if (!in_dev) 265 in_dev = __in_dev_get_rcu(ndev);
266 if (!in_dev) {
267 rcu_read_unlock();
259 return; 268 return;
269 }
260 270
261 for_ifa(in_dev) { 271 for_ifa(in_dev) {
262 struct sockaddr_in ip; 272 struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
263 273
264 ip.sin_family = AF_INET; 274 if (!entry) {
265 ip.sin_addr.s_addr = ifa->ifa_address; 275 pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n");
266 update_gid_ip(GID_ADD, ib_dev, port, ndev, 276 continue;
267 (struct sockaddr *)&ip); 277 }
278 entry->ip.sin_family = AF_INET;
279 entry->ip.sin_addr.s_addr = ifa->ifa_address;
280 list_add_tail(&entry->list, &sin_list);
268 } 281 }
269 endfor_ifa(in_dev); 282 endfor_ifa(in_dev);
283 rcu_read_unlock();
270 284
271 in_dev_put(in_dev); 285 list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
286 update_gid_ip(GID_ADD, ib_dev, port, ndev,
287 (struct sockaddr *)&sin_iter->ip);
288 list_del(&sin_iter->list);
289 kfree(sin_iter);
290 }
272} 291}
273 292
274static void enum_netdev_ipv6_ips(struct ib_device *ib_dev, 293static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index a53fc9b01c69..30467d10df91 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1624,11 +1624,16 @@ static int ucma_open(struct inode *inode, struct file *filp)
1624 if (!file) 1624 if (!file)
1625 return -ENOMEM; 1625 return -ENOMEM;
1626 1626
1627 file->close_wq = create_singlethread_workqueue("ucma_close_id");
1628 if (!file->close_wq) {
1629 kfree(file);
1630 return -ENOMEM;
1631 }
1632
1627 INIT_LIST_HEAD(&file->event_list); 1633 INIT_LIST_HEAD(&file->event_list);
1628 INIT_LIST_HEAD(&file->ctx_list); 1634 INIT_LIST_HEAD(&file->ctx_list);
1629 init_waitqueue_head(&file->poll_wait); 1635 init_waitqueue_head(&file->poll_wait);
1630 mutex_init(&file->mut); 1636 mutex_init(&file->mut);
1631 file->close_wq = create_singlethread_workqueue("ucma_close_id");
1632 1637
1633 filp->private_data = file; 1638 filp->private_data = file;
1634 file->filp = filp; 1639 file->filp = filp;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 35365f046923..d65cf42399e8 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2115,15 +2115,19 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2115 return -ENOMEM; 2115 return -ENOMEM;
2116 /* It is large page*/ 2116 /* It is large page*/
2117 if (largepage_lvl > 1) { 2117 if (largepage_lvl > 1) {
2118 unsigned long nr_superpages, end_pfn;
2119
2118 pteval |= DMA_PTE_LARGE_PAGE; 2120 pteval |= DMA_PTE_LARGE_PAGE;
2119 lvl_pages = lvl_to_nr_pages(largepage_lvl); 2121 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2122
2123 nr_superpages = sg_res / lvl_pages;
2124 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2125
2120 /* 2126 /*
2121 * Ensure that old small page tables are 2127 * Ensure that old small page tables are
2122 * removed to make room for superpage, 2128 * removed to make room for superpage(s).
2123 * if they exist.
2124 */ 2129 */
2125 dma_pte_free_pagetable(domain, iov_pfn, 2130 dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
2126 iov_pfn + lvl_pages - 1);
2127 } else { 2131 } else {
2128 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; 2132 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2129 } 2133 }
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 20cc36b01b77..0a17d1b91a81 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -634,10 +634,10 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
634 634
635 disk_super = dm_block_data(sblock); 635 disk_super = dm_block_data(sblock);
636 636
637 disk_super->flags = cpu_to_le32(cmd->flags);
637 if (mutator) 638 if (mutator)
638 update_flags(disk_super, mutator); 639 update_flags(disk_super, mutator);
639 640
640 disk_super->flags = cpu_to_le32(cmd->flags);
641 disk_super->mapping_root = cpu_to_le64(cmd->root); 641 disk_super->mapping_root = cpu_to_le64(cmd->root);
642 disk_super->hint_root = cpu_to_le64(cmd->hint_root); 642 disk_super->hint_root = cpu_to_le64(cmd->hint_root);
643 disk_super->discard_root = cpu_to_le64(cmd->discard_root); 643 disk_super->discard_root = cpu_to_le64(cmd->discard_root);
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 421a36c593e3..2e4c4cb79e4d 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -301,11 +301,16 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
301{ 301{
302 int s; 302 int s;
303 uint32_t max_entries = le32_to_cpu(left->header.max_entries); 303 uint32_t max_entries = le32_to_cpu(left->header.max_entries);
304 unsigned target = (nr_left + nr_center + nr_right) / 3; 304 unsigned total = nr_left + nr_center + nr_right;
305 BUG_ON(target > max_entries); 305 unsigned target_right = total / 3;
306 unsigned remainder = (target_right * 3) != total;
307 unsigned target_left = target_right + remainder;
308
309 BUG_ON(target_left > max_entries);
310 BUG_ON(target_right > max_entries);
306 311
307 if (nr_left < nr_right) { 312 if (nr_left < nr_right) {
308 s = nr_left - target; 313 s = nr_left - target_left;
309 314
310 if (s < 0 && nr_center < -s) { 315 if (s < 0 && nr_center < -s) {
311 /* not enough in central node */ 316 /* not enough in central node */
@@ -316,10 +321,10 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
316 } else 321 } else
317 shift(left, center, s); 322 shift(left, center, s);
318 323
319 shift(center, right, target - nr_right); 324 shift(center, right, target_right - nr_right);
320 325
321 } else { 326 } else {
322 s = target - nr_right; 327 s = target_right - nr_right;
323 if (s > 0 && nr_center < s) { 328 if (s > 0 && nr_center < s) {
324 /* not enough in central node */ 329 /* not enough in central node */
325 shift(center, right, nr_center); 330 shift(center, right, nr_center);
@@ -329,7 +334,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
329 } else 334 } else
330 shift(center, right, s); 335 shift(center, right, s);
331 336
332 shift(left, center, nr_left - target); 337 shift(left, center, nr_left - target_left);
333 } 338 }
334 339
335 *key_ptr(parent, c->index) = center->keys[0]; 340 *key_ptr(parent, c->index) = center->keys[0];
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index b6cec258cc21..0e09aef43998 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -523,7 +523,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
523 523
524 r = new_block(s->info, &right); 524 r = new_block(s->info, &right);
525 if (r < 0) { 525 if (r < 0) {
526 /* FIXME: put left */ 526 unlock_block(s->info, left);
527 return r; 527 return r;
528 } 528 }
529 529
diff --git a/drivers/media/dvb-frontends/horus3a.h b/drivers/media/dvb-frontends/horus3a.h
index b055319d532e..c1e2d1834b78 100644
--- a/drivers/media/dvb-frontends/horus3a.h
+++ b/drivers/media/dvb-frontends/horus3a.h
@@ -46,8 +46,8 @@ extern struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
46 const struct horus3a_config *config, 46 const struct horus3a_config *config,
47 struct i2c_adapter *i2c); 47 struct i2c_adapter *i2c);
48#else 48#else
49static inline struct dvb_frontend *horus3a_attach( 49static inline struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
50 const struct cxd2820r_config *config, 50 const struct horus3a_config *config,
51 struct i2c_adapter *i2c) 51 struct i2c_adapter *i2c)
52{ 52{
53 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 53 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/dvb-frontends/lnbh25.h b/drivers/media/dvb-frontends/lnbh25.h
index 69f30e21f6b3..1f329ef05acc 100644
--- a/drivers/media/dvb-frontends/lnbh25.h
+++ b/drivers/media/dvb-frontends/lnbh25.h
@@ -43,7 +43,7 @@ struct dvb_frontend *lnbh25_attach(
43 struct lnbh25_config *cfg, 43 struct lnbh25_config *cfg,
44 struct i2c_adapter *i2c); 44 struct i2c_adapter *i2c);
45#else 45#else
46static inline dvb_frontend *lnbh25_attach( 46static inline struct dvb_frontend *lnbh25_attach(
47 struct dvb_frontend *fe, 47 struct dvb_frontend *fe,
48 struct lnbh25_config *cfg, 48 struct lnbh25_config *cfg,
49 struct i2c_adapter *i2c) 49 struct i2c_adapter *i2c)
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index ff31e7a01ca9..feeeb70d841e 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -18,6 +18,27 @@
18 18
19static struct dvb_frontend_ops m88ds3103_ops; 19static struct dvb_frontend_ops m88ds3103_ops;
20 20
21/* write single register with mask */
22static int m88ds3103_update_bits(struct m88ds3103_dev *dev,
23 u8 reg, u8 mask, u8 val)
24{
25 int ret;
26 u8 tmp;
27
28 /* no need for read if whole reg is written */
29 if (mask != 0xff) {
30 ret = regmap_bulk_read(dev->regmap, reg, &tmp, 1);
31 if (ret)
32 return ret;
33
34 val &= mask;
35 tmp &= ~mask;
36 val |= tmp;
37 }
38
39 return regmap_bulk_write(dev->regmap, reg, &val, 1);
40}
41
21/* write reg val table using reg addr auto increment */ 42/* write reg val table using reg addr auto increment */
22static int m88ds3103_wr_reg_val_tab(struct m88ds3103_dev *dev, 43static int m88ds3103_wr_reg_val_tab(struct m88ds3103_dev *dev,
23 const struct m88ds3103_reg_val *tab, int tab_len) 44 const struct m88ds3103_reg_val *tab, int tab_len)
@@ -394,10 +415,10 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
394 u8tmp2 = 0x00; /* 0b00 */ 415 u8tmp2 = 0x00; /* 0b00 */
395 break; 416 break;
396 } 417 }
397 ret = regmap_update_bits(dev->regmap, 0x22, 0xc0, u8tmp1 << 6); 418 ret = m88ds3103_update_bits(dev, 0x22, 0xc0, u8tmp1 << 6);
398 if (ret) 419 if (ret)
399 goto err; 420 goto err;
400 ret = regmap_update_bits(dev->regmap, 0x24, 0xc0, u8tmp2 << 6); 421 ret = m88ds3103_update_bits(dev, 0x24, 0xc0, u8tmp2 << 6);
401 if (ret) 422 if (ret)
402 goto err; 423 goto err;
403 } 424 }
@@ -455,13 +476,13 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
455 if (ret) 476 if (ret)
456 goto err; 477 goto err;
457 } 478 }
458 ret = regmap_update_bits(dev->regmap, 0x9d, 0x08, 0x08); 479 ret = m88ds3103_update_bits(dev, 0x9d, 0x08, 0x08);
459 if (ret) 480 if (ret)
460 goto err; 481 goto err;
461 ret = regmap_write(dev->regmap, 0xf1, 0x01); 482 ret = regmap_write(dev->regmap, 0xf1, 0x01);
462 if (ret) 483 if (ret)
463 goto err; 484 goto err;
464 ret = regmap_update_bits(dev->regmap, 0x30, 0x80, 0x80); 485 ret = m88ds3103_update_bits(dev, 0x30, 0x80, 0x80);
465 if (ret) 486 if (ret)
466 goto err; 487 goto err;
467 } 488 }
@@ -498,7 +519,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
498 switch (dev->cfg->ts_mode) { 519 switch (dev->cfg->ts_mode) {
499 case M88DS3103_TS_SERIAL: 520 case M88DS3103_TS_SERIAL:
500 case M88DS3103_TS_SERIAL_D7: 521 case M88DS3103_TS_SERIAL_D7:
501 ret = regmap_update_bits(dev->regmap, 0x29, 0x20, u8tmp1); 522 ret = m88ds3103_update_bits(dev, 0x29, 0x20, u8tmp1);
502 if (ret) 523 if (ret)
503 goto err; 524 goto err;
504 u8tmp1 = 0; 525 u8tmp1 = 0;
@@ -567,11 +588,11 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
567 if (ret) 588 if (ret)
568 goto err; 589 goto err;
569 590
570 ret = regmap_update_bits(dev->regmap, 0x4d, 0x02, dev->cfg->spec_inv << 1); 591 ret = m88ds3103_update_bits(dev, 0x4d, 0x02, dev->cfg->spec_inv << 1);
571 if (ret) 592 if (ret)
572 goto err; 593 goto err;
573 594
574 ret = regmap_update_bits(dev->regmap, 0x30, 0x10, dev->cfg->agc_inv << 4); 595 ret = m88ds3103_update_bits(dev, 0x30, 0x10, dev->cfg->agc_inv << 4);
575 if (ret) 596 if (ret)
576 goto err; 597 goto err;
577 598
@@ -625,13 +646,13 @@ static int m88ds3103_init(struct dvb_frontend *fe)
625 dev->warm = false; 646 dev->warm = false;
626 647
627 /* wake up device from sleep */ 648 /* wake up device from sleep */
628 ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x01); 649 ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x01);
629 if (ret) 650 if (ret)
630 goto err; 651 goto err;
631 ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x00); 652 ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x00);
632 if (ret) 653 if (ret)
633 goto err; 654 goto err;
634 ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x00); 655 ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x00);
635 if (ret) 656 if (ret)
636 goto err; 657 goto err;
637 658
@@ -749,18 +770,18 @@ static int m88ds3103_sleep(struct dvb_frontend *fe)
749 utmp = 0x29; 770 utmp = 0x29;
750 else 771 else
751 utmp = 0x27; 772 utmp = 0x27;
752 ret = regmap_update_bits(dev->regmap, utmp, 0x01, 0x00); 773 ret = m88ds3103_update_bits(dev, utmp, 0x01, 0x00);
753 if (ret) 774 if (ret)
754 goto err; 775 goto err;
755 776
756 /* sleep */ 777 /* sleep */
757 ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x00); 778 ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x00);
758 if (ret) 779 if (ret)
759 goto err; 780 goto err;
760 ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x01); 781 ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x01);
761 if (ret) 782 if (ret)
762 goto err; 783 goto err;
763 ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x10); 784 ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x10);
764 if (ret) 785 if (ret)
765 goto err; 786 goto err;
766 787
@@ -992,12 +1013,12 @@ static int m88ds3103_set_tone(struct dvb_frontend *fe,
992 } 1013 }
993 1014
994 utmp = tone << 7 | dev->cfg->envelope_mode << 5; 1015 utmp = tone << 7 | dev->cfg->envelope_mode << 5;
995 ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); 1016 ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp);
996 if (ret) 1017 if (ret)
997 goto err; 1018 goto err;
998 1019
999 utmp = 1 << 2; 1020 utmp = 1 << 2;
1000 ret = regmap_update_bits(dev->regmap, 0xa1, reg_a1_mask, utmp); 1021 ret = m88ds3103_update_bits(dev, 0xa1, reg_a1_mask, utmp);
1001 if (ret) 1022 if (ret)
1002 goto err; 1023 goto err;
1003 1024
@@ -1047,7 +1068,7 @@ static int m88ds3103_set_voltage(struct dvb_frontend *fe,
1047 voltage_dis ^= dev->cfg->lnb_en_pol; 1068 voltage_dis ^= dev->cfg->lnb_en_pol;
1048 1069
1049 utmp = voltage_dis << 1 | voltage_sel << 0; 1070 utmp = voltage_dis << 1 | voltage_sel << 0;
1050 ret = regmap_update_bits(dev->regmap, 0xa2, 0x03, utmp); 1071 ret = m88ds3103_update_bits(dev, 0xa2, 0x03, utmp);
1051 if (ret) 1072 if (ret)
1052 goto err; 1073 goto err;
1053 1074
@@ -1080,7 +1101,7 @@ static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe,
1080 } 1101 }
1081 1102
1082 utmp = dev->cfg->envelope_mode << 5; 1103 utmp = dev->cfg->envelope_mode << 5;
1083 ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); 1104 ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp);
1084 if (ret) 1105 if (ret)
1085 goto err; 1106 goto err;
1086 1107
@@ -1115,12 +1136,12 @@ static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe,
1115 } else { 1136 } else {
1116 dev_dbg(&client->dev, "diseqc tx timeout\n"); 1137 dev_dbg(&client->dev, "diseqc tx timeout\n");
1117 1138
1118 ret = regmap_update_bits(dev->regmap, 0xa1, 0xc0, 0x40); 1139 ret = m88ds3103_update_bits(dev, 0xa1, 0xc0, 0x40);
1119 if (ret) 1140 if (ret)
1120 goto err; 1141 goto err;
1121 } 1142 }
1122 1143
1123 ret = regmap_update_bits(dev->regmap, 0xa2, 0xc0, 0x80); 1144 ret = m88ds3103_update_bits(dev, 0xa2, 0xc0, 0x80);
1124 if (ret) 1145 if (ret)
1125 goto err; 1146 goto err;
1126 1147
@@ -1152,7 +1173,7 @@ static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe,
1152 } 1173 }
1153 1174
1154 utmp = dev->cfg->envelope_mode << 5; 1175 utmp = dev->cfg->envelope_mode << 5;
1155 ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); 1176 ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp);
1156 if (ret) 1177 if (ret)
1157 goto err; 1178 goto err;
1158 1179
@@ -1194,12 +1215,12 @@ static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe,
1194 } else { 1215 } else {
1195 dev_dbg(&client->dev, "diseqc tx timeout\n"); 1216 dev_dbg(&client->dev, "diseqc tx timeout\n");
1196 1217
1197 ret = regmap_update_bits(dev->regmap, 0xa1, 0xc0, 0x40); 1218 ret = m88ds3103_update_bits(dev, 0xa1, 0xc0, 0x40);
1198 if (ret) 1219 if (ret)
1199 goto err; 1220 goto err;
1200 } 1221 }
1201 1222
1202 ret = regmap_update_bits(dev->regmap, 0xa2, 0xc0, 0x80); 1223 ret = m88ds3103_update_bits(dev, 0xa2, 0xc0, 0x80);
1203 if (ret) 1224 if (ret)
1204 goto err; 1225 goto err;
1205 1226
@@ -1435,13 +1456,13 @@ static int m88ds3103_probe(struct i2c_client *client,
1435 goto err_kfree; 1456 goto err_kfree;
1436 1457
1437 /* sleep */ 1458 /* sleep */
1438 ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x00); 1459 ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x00);
1439 if (ret) 1460 if (ret)
1440 goto err_kfree; 1461 goto err_kfree;
1441 ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x01); 1462 ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x01);
1442 if (ret) 1463 if (ret)
1443 goto err_kfree; 1464 goto err_kfree;
1444 ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x10); 1465 ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x10);
1445 if (ret) 1466 if (ret)
1446 goto err_kfree; 1467 goto err_kfree;
1447 1468
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 81788c5a44d8..821a8f481507 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -502,6 +502,10 @@ static int si2168_init(struct dvb_frontend *fe)
502 /* firmware is in the new format */ 502 /* firmware is in the new format */
503 for (remaining = fw->size; remaining > 0; remaining -= 17) { 503 for (remaining = fw->size; remaining > 0; remaining -= 17) {
504 len = fw->data[fw->size - remaining]; 504 len = fw->data[fw->size - remaining];
505 if (len > SI2168_ARGLEN) {
506 ret = -EINVAL;
507 break;
508 }
505 memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len); 509 memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
506 cmd.wlen = len; 510 cmd.wlen = len;
507 cmd.rlen = 1; 511 cmd.rlen = 1;
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
index f55b3276f28d..56773f3893d4 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
@@ -80,11 +80,9 @@ irqreturn_t netup_spi_interrupt(struct netup_spi *spi)
80 u16 reg; 80 u16 reg;
81 unsigned long flags; 81 unsigned long flags;
82 82
83 if (!spi) { 83 if (!spi)
84 dev_dbg(&spi->master->dev,
85 "%s(): SPI not initialized\n", __func__);
86 return IRQ_NONE; 84 return IRQ_NONE;
87 } 85
88 spin_lock_irqsave(&spi->lock, flags); 86 spin_lock_irqsave(&spi->lock, flags);
89 reg = readw(&spi->regs->control_stat); 87 reg = readw(&spi->regs->control_stat);
90 if (!(reg & NETUP_SPI_CTRL_IRQ)) { 88 if (!(reg & NETUP_SPI_CTRL_IRQ)) {
@@ -234,11 +232,9 @@ void netup_spi_release(struct netup_unidvb_dev *ndev)
234 unsigned long flags; 232 unsigned long flags;
235 struct netup_spi *spi = ndev->spi; 233 struct netup_spi *spi = ndev->spi;
236 234
237 if (!spi) { 235 if (!spi)
238 dev_dbg(&spi->master->dev,
239 "%s(): SPI not initialized\n", __func__);
240 return; 236 return;
241 } 237
242 spin_lock_irqsave(&spi->lock, flags); 238 spin_lock_irqsave(&spi->lock, flags);
243 reg = readw(&spi->regs->control_stat); 239 reg = readw(&spi->regs->control_stat);
244 writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat); 240 writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat);
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 486aef50d99b..f922f2e827bc 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -1097,7 +1097,7 @@ static int load_slim_core_fw(const struct firmware *fw, void *context)
1097 Elf32_Ehdr *ehdr; 1097 Elf32_Ehdr *ehdr;
1098 Elf32_Phdr *phdr; 1098 Elf32_Phdr *phdr;
1099 u8 __iomem *dst; 1099 u8 __iomem *dst;
1100 int err, i; 1100 int err = 0, i;
1101 1101
1102 if (!fw || !context) 1102 if (!fw || !context)
1103 return -EINVAL; 1103 return -EINVAL;
@@ -1106,7 +1106,7 @@ static int load_slim_core_fw(const struct firmware *fw, void *context)
1106 phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff); 1106 phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
1107 1107
1108 /* go through the available ELF segments */ 1108 /* go through the available ELF segments */
1109 for (i = 0; i < ehdr->e_phnum && !err; i++, phdr++) { 1109 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1110 1110
1111 /* Only consider LOAD segments */ 1111 /* Only consider LOAD segments */
1112 if (phdr->p_type != PT_LOAD) 1112 if (phdr->p_type != PT_LOAD)
@@ -1192,7 +1192,6 @@ err:
1192 1192
1193static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei) 1193static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei)
1194{ 1194{
1195 int ret;
1196 int err; 1195 int err;
1197 1196
1198 dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA); 1197 dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
@@ -1207,7 +1206,7 @@ static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei)
1207 if (err) { 1206 if (err) {
1208 dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err); 1207 dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err);
1209 complete_all(&fei->fw_ack); 1208 complete_all(&fei->fw_ack);
1210 return ret; 1209 return err;
1211 } 1210 }
1212 1211
1213 return 0; 1212 return 0;
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index 1c087cb76815..d0549fba711c 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -257,7 +257,7 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
257 goto clkerr; 257 goto clkerr;
258 258
259 if (devm_request_irq(dev, priv->irq, hix5hd2_ir_rx_interrupt, 259 if (devm_request_irq(dev, priv->irq, hix5hd2_ir_rx_interrupt,
260 IRQF_NO_SUSPEND, pdev->name, priv) < 0) { 260 0, pdev->name, priv) < 0) {
261 dev_err(dev, "IRQ %d register failed\n", priv->irq); 261 dev_err(dev, "IRQ %d register failed\n", priv->irq);
262 ret = -EINVAL; 262 ret = -EINVAL;
263 goto regerr; 263 goto regerr;
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 507382160e5e..ce157edd45fa 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -166,6 +166,10 @@ static int si2157_init(struct dvb_frontend *fe)
166 166
167 for (remaining = fw->size; remaining > 0; remaining -= 17) { 167 for (remaining = fw->size; remaining > 0; remaining -= 17) {
168 len = fw->data[fw->size - remaining]; 168 len = fw->data[fw->size - remaining];
169 if (len > SI2157_ARGLEN) {
170 dev_err(&client->dev, "Bad firmware length\n");
171 goto err_release_firmware;
172 }
169 memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len); 173 memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
170 cmd.wlen = len; 174 cmd.wlen = len;
171 cmd.rlen = 1; 175 cmd.rlen = 1;
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index c3cac4c12fb3..197a4f2e54d2 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -34,6 +34,14 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
34 unsigned int pipe; 34 unsigned int pipe;
35 u8 requesttype; 35 u8 requesttype;
36 36
37 mutex_lock(&d->usb_mutex);
38
39 if (req->size > sizeof(dev->buf)) {
40 dev_err(&d->intf->dev, "too large message %u\n", req->size);
41 ret = -EINVAL;
42 goto err_mutex_unlock;
43 }
44
37 if (req->index & CMD_WR_FLAG) { 45 if (req->index & CMD_WR_FLAG) {
38 /* write */ 46 /* write */
39 memcpy(dev->buf, req->data, req->size); 47 memcpy(dev->buf, req->data, req->size);
@@ -50,14 +58,17 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
50 dvb_usb_dbg_usb_control_msg(d->udev, 0, requesttype, req->value, 58 dvb_usb_dbg_usb_control_msg(d->udev, 0, requesttype, req->value,
51 req->index, dev->buf, req->size); 59 req->index, dev->buf, req->size);
52 if (ret < 0) 60 if (ret < 0)
53 goto err; 61 goto err_mutex_unlock;
54 62
55 /* read request, copy returned data to return buf */ 63 /* read request, copy returned data to return buf */
56 if (requesttype == (USB_TYPE_VENDOR | USB_DIR_IN)) 64 if (requesttype == (USB_TYPE_VENDOR | USB_DIR_IN))
57 memcpy(req->data, dev->buf, req->size); 65 memcpy(req->data, dev->buf, req->size);
58 66
67 mutex_unlock(&d->usb_mutex);
68
59 return 0; 69 return 0;
60err: 70err_mutex_unlock:
71 mutex_unlock(&d->usb_mutex);
61 dev_dbg(&d->intf->dev, "failed=%d\n", ret); 72 dev_dbg(&d->intf->dev, "failed=%d\n", ret);
62 return ret; 73 return ret;
63} 74}
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
index 9f6115a2ee01..138062960a73 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
@@ -71,7 +71,7 @@
71 71
72 72
73struct rtl28xxu_dev { 73struct rtl28xxu_dev {
74 u8 buf[28]; 74 u8 buf[128];
75 u8 chip_id; 75 u8 chip_id;
76 u8 tuner; 76 u8 tuner;
77 char *tuner_name; 77 char *tuner_name;
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 82876a67f144..9beece00869b 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -47,7 +47,7 @@ config V4L2_MEM2MEM_DEV
47# Used by LED subsystem flash drivers 47# Used by LED subsystem flash drivers
48config V4L2_FLASH_LED_CLASS 48config V4L2_FLASH_LED_CLASS
49 tristate "V4L2 flash API for LED flash class devices" 49 tristate "V4L2 flash API for LED flash class devices"
50 depends on VIDEO_V4L2_SUBDEV_API 50 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
51 depends on LEDS_CLASS_FLASH 51 depends on LEDS_CLASS_FLASH
52 ---help--- 52 ---help---
53 Say Y here to enable V4L2 flash API support for LED flash 53 Say Y here to enable V4L2 flash API support for LED flash
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index c6a644b22af4..6f3154613dc7 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -58,12 +58,18 @@ config OMAP_GPMC
58 memory drives like NOR, NAND, OneNAND, SRAM. 58 memory drives like NOR, NAND, OneNAND, SRAM.
59 59
60config OMAP_GPMC_DEBUG 60config OMAP_GPMC_DEBUG
61 bool 61 bool "Enable GPMC debug output and skip reset of GPMC during init"
62 depends on OMAP_GPMC 62 depends on OMAP_GPMC
63 help 63 help
64 Enables verbose debugging mostly to decode the bootloader provided 64 Enables verbose debugging mostly to decode the bootloader provided
65 timings. Enable this during development to configure devices 65 timings. To preserve the bootloader provided timings, the reset
66 connected to the GPMC bus. 66 of GPMC is skipped during init. Enable this during development to
67 configure devices connected to the GPMC bus.
68
69 NOTE: In addition to matching the register setup with the bootloader
70 you also need to match the GPMC FCLK frequency used by the
71 bootloader or else the GPMC timings won't be identical with the
72 bootloader timings.
67 73
68config MVEBU_DEVBUS 74config MVEBU_DEVBUS
69 bool "Marvell EBU Device Bus Controller" 75 bool "Marvell EBU Device Bus Controller"
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 32ac049f2bc4..6515dfc2b805 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -696,7 +696,6 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
696 int div; 696 int div;
697 u32 l; 697 u32 l;
698 698
699 gpmc_cs_show_timings(cs, "before gpmc_cs_set_timings");
700 div = gpmc_calc_divider(t->sync_clk); 699 div = gpmc_calc_divider(t->sync_clk);
701 if (div < 0) 700 if (div < 0)
702 return div; 701 return div;
@@ -1988,6 +1987,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
1988 if (ret < 0) 1987 if (ret < 0)
1989 goto err; 1988 goto err;
1990 1989
1990 gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings");
1991 ret = gpmc_cs_program_settings(cs, &gpmc_s); 1991 ret = gpmc_cs_program_settings(cs, &gpmc_s);
1992 if (ret < 0) 1992 if (ret < 0)
1993 goto err; 1993 goto err;
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b78cf5d403a3..7fc9174d4619 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2263,15 +2263,12 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2263/* 2263/*
2264 * eMMC hardware reset. 2264 * eMMC hardware reset.
2265 */ 2265 */
2266static int mmc_test_hw_reset(struct mmc_test_card *test) 2266static int mmc_test_reset(struct mmc_test_card *test)
2267{ 2267{
2268 struct mmc_card *card = test->card; 2268 struct mmc_card *card = test->card;
2269 struct mmc_host *host = card->host; 2269 struct mmc_host *host = card->host;
2270 int err; 2270 int err;
2271 2271
2272 if (!mmc_card_mmc(card) || !mmc_can_reset(card))
2273 return RESULT_UNSUP_CARD;
2274
2275 err = mmc_hw_reset(host); 2272 err = mmc_hw_reset(host);
2276 if (!err) 2273 if (!err)
2277 return RESULT_OK; 2274 return RESULT_OK;
@@ -2605,8 +2602,8 @@ static const struct mmc_test_case mmc_test_cases[] = {
2605 }, 2602 },
2606 2603
2607 { 2604 {
2608 .name = "eMMC hardware reset", 2605 .name = "Reset test",
2609 .run = mmc_test_hw_reset, 2606 .run = mmc_test_reset,
2610 }, 2607 },
2611}; 2608};
2612 2609
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index e726903170a8..f6cd995dbe92 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1924,7 +1924,6 @@ EXPORT_SYMBOL(mmc_can_reset);
1924static int mmc_reset(struct mmc_host *host) 1924static int mmc_reset(struct mmc_host *host)
1925{ 1925{
1926 struct mmc_card *card = host->card; 1926 struct mmc_card *card = host->card;
1927 u32 status;
1928 1927
1929 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 1928 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1930 return -EOPNOTSUPP; 1929 return -EOPNOTSUPP;
@@ -1937,12 +1936,6 @@ static int mmc_reset(struct mmc_host *host)
1937 1936
1938 host->ops->hw_reset(host); 1937 host->ops->hw_reset(host);
1939 1938
1940 /* If the reset has happened, then a status command will fail */
1941 if (!mmc_send_status(card, &status)) {
1942 mmc_host_clk_release(host);
1943 return -ENOSYS;
1944 }
1945
1946 /* Set initial state and call mmc_set_ios */ 1939 /* Set initial state and call mmc_set_ios */
1947 mmc_set_initial_state(host); 1940 mmc_set_initial_state(host);
1948 mmc_host_clk_release(host); 1941 mmc_host_clk_release(host);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index e5fac368068a..131026fbc2d7 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -87,6 +87,7 @@ static const struct pci_device_id peak_pci_tbl[] = {
87 {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 87 {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
88 {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 88 {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
89 {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 89 {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
90 {PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
90#ifdef CONFIG_CAN_PEAK_PCIEC 91#ifdef CONFIG_CAN_PEAK_PCIEC
91 {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 92 {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
92 {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 93 {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 2c063b60db4b..96f485ab612e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -327,9 +327,13 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
327 pdata->debugfs_xpcs_reg = 0; 327 pdata->debugfs_xpcs_reg = 0;
328 328
329 buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name); 329 buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
330 if (!buf)
331 return;
332
330 pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL); 333 pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
331 if (!pdata->xgbe_debugfs) { 334 if (!pdata->xgbe_debugfs) {
332 netdev_err(pdata->netdev, "debugfs_create_dir failed\n"); 335 netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
336 kfree(buf);
333 return; 337 return;
334 } 338 }
335 339
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index aeb7ce64452e..be628bd9fb18 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3351,6 +3351,13 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
3351 udp_rss_requested = 0; 3351 udp_rss_requested = 0;
3352 else 3352 else
3353 return -EINVAL; 3353 return -EINVAL;
3354
3355 if (CHIP_IS_E1x(bp) && udp_rss_requested) {
3356 DP(BNX2X_MSG_ETHTOOL,
3357 "57710, 57711 boards don't support RSS according to UDP 4-tuple\n");
3358 return -EINVAL;
3359 }
3360
3354 if ((info->flow_type == UDP_V4_FLOW) && 3361 if ((info->flow_type == UDP_V4_FLOW) &&
3355 (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) { 3362 (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
3356 bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested; 3363 bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 3bc701e4c59e..1805541b4240 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1683,6 +1683,24 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1683 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); 1683 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1684} 1684}
1685 1685
1686static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
1687{
1688 u32 int0_enable = 0;
1689
1690 /* Monitor cable plug/unplugged event for internal PHY, external PHY
1691 * and MoCA PHY
1692 */
1693 if (priv->internal_phy) {
1694 int0_enable |= UMAC_IRQ_LINK_EVENT;
1695 } else if (priv->ext_phy) {
1696 int0_enable |= UMAC_IRQ_LINK_EVENT;
1697 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1698 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
1699 int0_enable |= UMAC_IRQ_LINK_EVENT;
1700 }
1701 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1702}
1703
1686static int init_umac(struct bcmgenet_priv *priv) 1704static int init_umac(struct bcmgenet_priv *priv)
1687{ 1705{
1688 struct device *kdev = &priv->pdev->dev; 1706 struct device *kdev = &priv->pdev->dev;
@@ -1723,15 +1741,8 @@ static int init_umac(struct bcmgenet_priv *priv)
1723 /* Enable Tx default queue 16 interrupts */ 1741 /* Enable Tx default queue 16 interrupts */
1724 int0_enable |= UMAC_IRQ_TXDMA_DONE; 1742 int0_enable |= UMAC_IRQ_TXDMA_DONE;
1725 1743
1726 /* Monitor cable plug/unplugged event for internal PHY */ 1744 /* Configure backpressure vectors for MoCA */
1727 if (priv->internal_phy) { 1745 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1728 int0_enable |= UMAC_IRQ_LINK_EVENT;
1729 } else if (priv->ext_phy) {
1730 int0_enable |= UMAC_IRQ_LINK_EVENT;
1731 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1732 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
1733 int0_enable |= UMAC_IRQ_LINK_EVENT;
1734
1735 reg = bcmgenet_bp_mc_get(priv); 1746 reg = bcmgenet_bp_mc_get(priv);
1736 reg |= BIT(priv->hw_params->bp_in_en_shift); 1747 reg |= BIT(priv->hw_params->bp_in_en_shift);
1737 1748
@@ -2645,6 +2656,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
2645 2656
2646 netif_tx_start_all_queues(dev); 2657 netif_tx_start_all_queues(dev);
2647 2658
2659 /* Monitor link interrupts now */
2660 bcmgenet_link_intr_enable(priv);
2661
2648 phy_start(priv->phydev); 2662 phy_start(priv->phydev);
2649} 2663}
2650 2664
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 821540913343..d463563e1f70 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -592,6 +592,7 @@ struct be_adapter {
592 int be_get_temp_freq; 592 int be_get_temp_freq;
593 struct be_hwmon hwmon_info; 593 struct be_hwmon hwmon_info;
594 u8 pf_number; 594 u8 pf_number;
595 u8 pci_func_num;
595 struct rss_info rss_info; 596 struct rss_info rss_info;
596 /* Filters for packets that need to be sent to BMC */ 597 /* Filters for packets that need to be sent to BMC */
597 u32 bmc_filt_mask; 598 u32 bmc_filt_mask;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index eb323913cd39..1795c935ff02 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -851,8 +851,10 @@ static int be_cmd_notify_wait(struct be_adapter *adapter,
851 return status; 851 return status;
852 852
853 dest_wrb = be_cmd_copy(adapter, wrb); 853 dest_wrb = be_cmd_copy(adapter, wrb);
854 if (!dest_wrb) 854 if (!dest_wrb) {
855 return -EBUSY; 855 status = -EBUSY;
856 goto unlock;
857 }
856 858
857 if (use_mcc(adapter)) 859 if (use_mcc(adapter))
858 status = be_mcc_notify_wait(adapter); 860 status = be_mcc_notify_wait(adapter);
@@ -862,6 +864,7 @@ static int be_cmd_notify_wait(struct be_adapter *adapter,
862 if (!status) 864 if (!status)
863 memcpy(wrb, dest_wrb, sizeof(*wrb)); 865 memcpy(wrb, dest_wrb, sizeof(*wrb));
864 866
867unlock:
865 be_cmd_unlock(adapter); 868 be_cmd_unlock(adapter);
866 return status; 869 return status;
867} 870}
@@ -1984,6 +1987,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1984 be_if_cap_flags(adapter)); 1987 be_if_cap_flags(adapter));
1985 } 1988 }
1986 flags &= be_if_cap_flags(adapter); 1989 flags &= be_if_cap_flags(adapter);
1990 if (!flags)
1991 return -ENOTSUPP;
1987 1992
1988 return __be_cmd_rx_filter(adapter, flags, value); 1993 return __be_cmd_rx_filter(adapter, flags, value);
1989} 1994}
@@ -2887,6 +2892,7 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2887 if (!status) { 2892 if (!status) {
2888 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); 2893 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2889 adapter->hba_port_num = attribs->hba_attribs.phy_port; 2894 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2895 adapter->pci_func_num = attribs->pci_func_num;
2890 serial_num = attribs->hba_attribs.controller_serial_number; 2896 serial_num = attribs->hba_attribs.controller_serial_number;
2891 for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++) 2897 for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
2892 adapter->serial_num[i] = le32_to_cpu(serial_num[i]) & 2898 adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
@@ -3709,7 +3715,6 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3709 status = -EINVAL; 3715 status = -EINVAL;
3710 goto err; 3716 goto err;
3711 } 3717 }
3712
3713 adapter->pf_number = desc->pf_num; 3718 adapter->pf_number = desc->pf_num;
3714 be_copy_nic_desc(res, desc); 3719 be_copy_nic_desc(res, desc);
3715 } 3720 }
@@ -3721,7 +3726,10 @@ err:
3721 return status; 3726 return status;
3722} 3727}
3723 3728
3724/* Will use MBOX only if MCCQ has not been created */ 3729/* Will use MBOX only if MCCQ has not been created
3730 * non-zero domain => a PF is querying this on behalf of a VF
3731 * zero domain => a PF or a VF is querying this for itself
3732 */
3725int be_cmd_get_profile_config(struct be_adapter *adapter, 3733int be_cmd_get_profile_config(struct be_adapter *adapter,
3726 struct be_resources *res, u8 query, u8 domain) 3734 struct be_resources *res, u8 query, u8 domain)
3727{ 3735{
@@ -3748,10 +3756,15 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
3748 OPCODE_COMMON_GET_PROFILE_CONFIG, 3756 OPCODE_COMMON_GET_PROFILE_CONFIG,
3749 cmd.size, &wrb, &cmd); 3757 cmd.size, &wrb, &cmd);
3750 3758
3751 req->hdr.domain = domain;
3752 if (!lancer_chip(adapter)) 3759 if (!lancer_chip(adapter))
3753 req->hdr.version = 1; 3760 req->hdr.version = 1;
3754 req->type = ACTIVE_PROFILE_TYPE; 3761 req->type = ACTIVE_PROFILE_TYPE;
3762 /* When a function is querying profile information relating to
3763 * itself hdr.pf_number must be set to it's pci_func_num + 1
3764 */
3765 req->hdr.domain = domain;
3766 if (domain == 0)
3767 req->hdr.pf_num = adapter->pci_func_num + 1;
3755 3768
3756 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the 3769 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
3757 * descriptors with all bits set to "1" for the fields which can be 3770 * descriptors with all bits set to "1" for the fields which can be
@@ -3921,12 +3934,16 @@ static void be_fill_vf_res_template(struct be_adapter *adapter,
3921 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | 3934 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3922 BE_IF_FLAGS_DEFQ_RSS); 3935 BE_IF_FLAGS_DEFQ_RSS);
3923 } 3936 }
3924
3925 nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
3926 } else { 3937 } else {
3927 num_vf_qs = 1; 3938 num_vf_qs = 1;
3928 } 3939 }
3929 3940
3941 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
3942 nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3943 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3944 }
3945
3946 nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
3930 nic_vft->rq_count = cpu_to_le16(num_vf_qs); 3947 nic_vft->rq_count = cpu_to_le16(num_vf_qs);
3931 nic_vft->txq_count = cpu_to_le16(num_vf_qs); 3948 nic_vft->txq_count = cpu_to_le16(num_vf_qs);
3932 nic_vft->rssq_count = cpu_to_le16(num_vf_qs); 3949 nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 7d178bdb112e..91155ea74f34 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -289,7 +289,9 @@ struct be_cmd_req_hdr {
289 u32 timeout; /* dword 1 */ 289 u32 timeout; /* dword 1 */
290 u32 request_length; /* dword 2 */ 290 u32 request_length; /* dword 2 */
291 u8 version; /* dword 3 */ 291 u8 version; /* dword 3 */
292 u8 rsvd[3]; /* dword 3 */ 292 u8 rsvd1; /* dword 3 */
293 u8 pf_num; /* dword 3 */
294 u8 rsvd2; /* dword 3 */
293}; 295};
294 296
295#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ 297#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
@@ -1652,7 +1654,11 @@ struct mgmt_hba_attribs {
1652 1654
1653struct mgmt_controller_attrib { 1655struct mgmt_controller_attrib {
1654 struct mgmt_hba_attribs hba_attribs; 1656 struct mgmt_hba_attribs hba_attribs;
1655 u32 rsvd0[10]; 1657 u32 rsvd0[2];
1658 u16 rsvd1;
1659 u8 pci_func_num;
1660 u8 rsvd2;
1661 u32 rsvd3[7];
1656} __packed; 1662} __packed;
1657 1663
1658struct be_cmd_req_cntl_attribs { 1664struct be_cmd_req_cntl_attribs {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 7bf51a1a0a77..eb48a977f8da 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1123,11 +1123,12 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1123 struct sk_buff *skb, 1123 struct sk_buff *skb,
1124 struct be_wrb_params *wrb_params) 1124 struct be_wrb_params *wrb_params)
1125{ 1125{
1126 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or 1126 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1127 * less may cause a transmit stall on that port. So the work-around is 1127 * packets that are 32b or less may cause a transmit stall
1128 * to pad short packets (<= 32 bytes) to a 36-byte length. 1128 * on that port. The workaround is to pad such packets
1129 * (len <= 32 bytes) to a minimum length of 36b.
1129 */ 1130 */
1130 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { 1131 if (skb->len <= 32) {
1131 if (skb_put_padto(skb, 36)) 1132 if (skb_put_padto(skb, 36))
1132 return NULL; 1133 return NULL;
1133 } 1134 }
@@ -4205,10 +4206,6 @@ static int be_get_config(struct be_adapter *adapter)
4205 int status, level; 4206 int status, level;
4206 u16 profile_id; 4207 u16 profile_id;
4207 4208
4208 status = be_cmd_get_cntl_attributes(adapter);
4209 if (status)
4210 return status;
4211
4212 status = be_cmd_query_fw_cfg(adapter); 4209 status = be_cmd_query_fw_cfg(adapter);
4213 if (status) 4210 if (status)
4214 return status; 4211 return status;
@@ -4407,6 +4404,11 @@ static int be_setup(struct be_adapter *adapter)
4407 if (!lancer_chip(adapter)) 4404 if (!lancer_chip(adapter))
4408 be_cmd_req_native_mode(adapter); 4405 be_cmd_req_native_mode(adapter);
4409 4406
4407 /* Need to invoke this cmd first to get the PCI Function Number */
4408 status = be_cmd_get_cntl_attributes(adapter);
4409 if (status)
4410 return status;
4411
4410 if (!BE2_chip(adapter) && be_physfn(adapter)) 4412 if (!BE2_chip(adapter) && be_physfn(adapter))
4411 be_alloc_sriov_res(adapter); 4413 be_alloc_sriov_res(adapter);
4412 4414
@@ -4999,7 +5001,15 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4999 return false; 5001 return false;
5000 } 5002 }
5001 5003
5002 return (fhdr->asic_type_rev >= adapter->asic_rev); 5004 /* In BE3 FW images the "asic_type_rev" field doesn't track the
5005 * asic_rev of the chips it is compatible with.
5006 * When asic_type_rev is 0 the image is compatible only with
5007 * pre-BE3-R chips (asic_rev < 0x10)
5008 */
5009 if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
5010 return adapter->asic_rev < 0x10;
5011 else
5012 return (fhdr->asic_type_rev >= adapter->asic_rev);
5003} 5013}
5004 5014
5005static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) 5015static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 3c40f6b99224..55c36230e176 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -198,11 +198,13 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
198 198
199#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) 199#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
200/* 200/*
201 * Return the TBIPA address, starting from the address
202 * of the mapped GFAR MDIO registers (struct gfar)
201 * This is mildly evil, but so is our hardware for doing this. 203 * This is mildly evil, but so is our hardware for doing this.
202 * Also, we have to cast back to struct gfar because of 204 * Also, we have to cast back to struct gfar because of
203 * definition weirdness done in gianfar.h. 205 * definition weirdness done in gianfar.h.
204 */ 206 */
205static uint32_t __iomem *get_gfar_tbipa(void __iomem *p) 207static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
206{ 208{
207 struct gfar __iomem *enet_regs = p; 209 struct gfar __iomem *enet_regs = p;
208 210
@@ -210,6 +212,15 @@ static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
210} 212}
211 213
212/* 214/*
215 * Return the TBIPA address, starting from the address
216 * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar)
217 */
218static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p)
219{
220 return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs));
221}
222
223/*
213 * Return the TBIPAR address for an eTSEC2 node 224 * Return the TBIPAR address for an eTSEC2 node
214 */ 225 */
215static uint32_t __iomem *get_etsec_tbipa(void __iomem *p) 226static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
@@ -220,11 +231,12 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
220 231
221#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) 232#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
222/* 233/*
223 * Return the TBIPAR address for a QE MDIO node 234 * Return the TBIPAR address for a QE MDIO node, starting from the address
235 * of the mapped MII registers (struct fsl_pq_mii)
224 */ 236 */
225static uint32_t __iomem *get_ucc_tbipa(void __iomem *p) 237static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
226{ 238{
227 struct fsl_pq_mdio __iomem *mdio = p; 239 struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii);
228 240
229 return &mdio->utbipar; 241 return &mdio->utbipar;
230} 242}
@@ -300,14 +312,14 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
300 .compatible = "fsl,gianfar-tbi", 312 .compatible = "fsl,gianfar-tbi",
301 .data = &(struct fsl_pq_mdio_data) { 313 .data = &(struct fsl_pq_mdio_data) {
302 .mii_offset = 0, 314 .mii_offset = 0,
303 .get_tbipa = get_gfar_tbipa, 315 .get_tbipa = get_gfar_tbipa_from_mii,
304 }, 316 },
305 }, 317 },
306 { 318 {
307 .compatible = "fsl,gianfar-mdio", 319 .compatible = "fsl,gianfar-mdio",
308 .data = &(struct fsl_pq_mdio_data) { 320 .data = &(struct fsl_pq_mdio_data) {
309 .mii_offset = 0, 321 .mii_offset = 0,
310 .get_tbipa = get_gfar_tbipa, 322 .get_tbipa = get_gfar_tbipa_from_mii,
311 }, 323 },
312 }, 324 },
313 { 325 {
@@ -315,7 +327,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
315 .compatible = "gianfar", 327 .compatible = "gianfar",
316 .data = &(struct fsl_pq_mdio_data) { 328 .data = &(struct fsl_pq_mdio_data) {
317 .mii_offset = offsetof(struct fsl_pq_mdio, mii), 329 .mii_offset = offsetof(struct fsl_pq_mdio, mii),
318 .get_tbipa = get_gfar_tbipa, 330 .get_tbipa = get_gfar_tbipa_from_mdio,
319 }, 331 },
320 }, 332 },
321 { 333 {
@@ -445,6 +457,16 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
445 457
446 tbipa = data->get_tbipa(priv->map); 458 tbipa = data->get_tbipa(priv->map);
447 459
460 /*
461 * Add consistency check to make sure TBI is contained
462 * within the mapped range (not because we would get a
463 * segfault, rather to catch bugs in computing TBI
464 * address). Print error message but continue anyway.
465 */
466 if ((void *)tbipa > priv->map + resource_size(&res) - 4)
467 dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n",
468 ((void *)tbipa - priv->map) + 4);
469
448 iowrite32be(be32_to_cpup(prop), tbipa); 470 iowrite32be(be32_to_cpup(prop), tbipa);
449 } 471 }
450 } 472 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 62488a67149d..c0e943aecd13 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -386,7 +386,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
386 386
387 hw->aq.asq.next_to_use = 0; 387 hw->aq.asq.next_to_use = 0;
388 hw->aq.asq.next_to_clean = 0; 388 hw->aq.asq.next_to_clean = 0;
389 hw->aq.asq.count = hw->aq.num_asq_entries;
390 389
391 /* allocate the ring memory */ 390 /* allocate the ring memory */
392 ret_code = i40e_alloc_adminq_asq_ring(hw); 391 ret_code = i40e_alloc_adminq_asq_ring(hw);
@@ -404,6 +403,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
404 goto init_adminq_free_rings; 403 goto init_adminq_free_rings;
405 404
406 /* success! */ 405 /* success! */
406 hw->aq.asq.count = hw->aq.num_asq_entries;
407 goto init_adminq_exit; 407 goto init_adminq_exit;
408 408
409init_adminq_free_rings: 409init_adminq_free_rings:
@@ -445,7 +445,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
445 445
446 hw->aq.arq.next_to_use = 0; 446 hw->aq.arq.next_to_use = 0;
447 hw->aq.arq.next_to_clean = 0; 447 hw->aq.arq.next_to_clean = 0;
448 hw->aq.arq.count = hw->aq.num_arq_entries;
449 448
450 /* allocate the ring memory */ 449 /* allocate the ring memory */
451 ret_code = i40e_alloc_adminq_arq_ring(hw); 450 ret_code = i40e_alloc_adminq_arq_ring(hw);
@@ -463,6 +462,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
463 goto init_adminq_free_rings; 462 goto init_adminq_free_rings;
464 463
465 /* success! */ 464 /* success! */
465 hw->aq.arq.count = hw->aq.num_arq_entries;
466 goto init_adminq_exit; 466 goto init_adminq_exit;
467 467
468init_adminq_free_rings: 468init_adminq_free_rings:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2fdf978ae6a5..dd44fafd8798 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -8389,6 +8389,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
8389 8389
8390 netdev->hw_enc_features |= NETIF_F_IP_CSUM | 8390 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
8391 NETIF_F_GSO_UDP_TUNNEL | 8391 NETIF_F_GSO_UDP_TUNNEL |
8392 NETIF_F_GSO_GRE |
8392 NETIF_F_TSO; 8393 NETIF_F_TSO;
8393 8394
8394 netdev->features = NETIF_F_SG | 8395 netdev->features = NETIF_F_SG |
@@ -8396,6 +8397,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
8396 NETIF_F_SCTP_CSUM | 8397 NETIF_F_SCTP_CSUM |
8397 NETIF_F_HIGHDMA | 8398 NETIF_F_HIGHDMA |
8398 NETIF_F_GSO_UDP_TUNNEL | 8399 NETIF_F_GSO_UDP_TUNNEL |
8400 NETIF_F_GSO_GRE |
8399 NETIF_F_HW_VLAN_CTAG_TX | 8401 NETIF_F_HW_VLAN_CTAG_TX |
8400 NETIF_F_HW_VLAN_CTAG_RX | 8402 NETIF_F_HW_VLAN_CTAG_RX |
8401 NETIF_F_HW_VLAN_CTAG_FILTER | 8403 NETIF_F_HW_VLAN_CTAG_FILTER |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 929d47152bf2..a23ebfd5cd25 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -373,7 +373,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
373 373
374 hw->aq.asq.next_to_use = 0; 374 hw->aq.asq.next_to_use = 0;
375 hw->aq.asq.next_to_clean = 0; 375 hw->aq.asq.next_to_clean = 0;
376 hw->aq.asq.count = hw->aq.num_asq_entries;
377 376
378 /* allocate the ring memory */ 377 /* allocate the ring memory */
379 ret_code = i40e_alloc_adminq_asq_ring(hw); 378 ret_code = i40e_alloc_adminq_asq_ring(hw);
@@ -391,6 +390,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
391 goto init_adminq_free_rings; 390 goto init_adminq_free_rings;
392 391
393 /* success! */ 392 /* success! */
393 hw->aq.asq.count = hw->aq.num_asq_entries;
394 goto init_adminq_exit; 394 goto init_adminq_exit;
395 395
396init_adminq_free_rings: 396init_adminq_free_rings:
@@ -432,7 +432,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
432 432
433 hw->aq.arq.next_to_use = 0; 433 hw->aq.arq.next_to_use = 0;
434 hw->aq.arq.next_to_clean = 0; 434 hw->aq.arq.next_to_clean = 0;
435 hw->aq.arq.count = hw->aq.num_arq_entries;
436 435
437 /* allocate the ring memory */ 436 /* allocate the ring memory */
438 ret_code = i40e_alloc_adminq_arq_ring(hw); 437 ret_code = i40e_alloc_adminq_arq_ring(hw);
@@ -450,6 +449,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
450 goto init_adminq_free_rings; 449 goto init_adminq_free_rings;
451 450
452 /* success! */ 451 /* success! */
452 hw->aq.arq.count = hw->aq.num_arq_entries;
453 goto init_adminq_exit; 453 goto init_adminq_exit;
454 454
455init_adminq_free_rings: 455init_adminq_free_rings:
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 8e81e53c370e..c34488479365 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1364,6 +1364,10 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
1364 * and performing a NOP command 1364 * and performing a NOP command
1365 */ 1365 */
1366 for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { 1366 for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
1367 /* Make sure request_irq was called */
1368 if (!priv->eq_table.eq[i].have_irq)
1369 continue;
1370
1367 /* Temporary use polling for command completions */ 1371 /* Temporary use polling for command completions */
1368 mlx4_cmd_use_polling(dev); 1372 mlx4_cmd_use_polling(dev);
1369 1373
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 006757f80988..cc3a9897574c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2669,14 +2669,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2669 2669
2670 if (msi_x) { 2670 if (msi_x) {
2671 int nreq = dev->caps.num_ports * num_online_cpus() + 1; 2671 int nreq = dev->caps.num_ports * num_online_cpus() + 1;
2672 bool shared_ports = false;
2673 2672
2674 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2673 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
2675 nreq); 2674 nreq);
2676 if (nreq > MAX_MSIX) { 2675 if (nreq > MAX_MSIX)
2677 nreq = MAX_MSIX; 2676 nreq = MAX_MSIX;
2678 shared_ports = true;
2679 }
2680 2677
2681 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2678 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2682 if (!entries) 2679 if (!entries)
@@ -2699,9 +2696,6 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2699 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, 2696 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
2700 dev->caps.num_ports); 2697 dev->caps.num_ports);
2701 2698
2702 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps))
2703 shared_ports = true;
2704
2705 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { 2699 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
2706 if (i == MLX4_EQ_ASYNC) 2700 if (i == MLX4_EQ_ASYNC)
2707 continue; 2701 continue;
@@ -2709,7 +2703,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2709 priv->eq_table.eq[i].irq = 2703 priv->eq_table.eq[i].irq =
2710 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; 2704 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
2711 2705
2712 if (shared_ports) { 2706 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
2713 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 2707 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2714 dev->caps.num_ports); 2708 dev->caps.num_ports);
2715 /* We don't set affinity hint when there 2709 /* We don't set affinity hint when there
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
index e71563ce05d1..22d603f78273 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -598,6 +598,8 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
598 return; 598 return;
599 599
600 priv->vlan.filter_disabled = false; 600 priv->vlan.filter_disabled = false;
601 if (priv->netdev->flags & IFF_PROMISC)
602 return;
601 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); 603 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
602} 604}
603 605
@@ -607,6 +609,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
607 return; 609 return;
608 610
609 priv->vlan.filter_disabled = true; 611 priv->vlan.filter_disabled = true;
612 if (priv->netdev->flags & IFF_PROMISC)
613 return;
610 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); 614 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
611} 615}
612 616
@@ -717,8 +721,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
717 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; 721 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
718 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; 722 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
719 723
720 if (enable_promisc) 724 if (enable_promisc) {
721 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); 725 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
726 if (!priv->vlan.filter_disabled)
727 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
728 0);
729 }
722 if (enable_allmulti) 730 if (enable_allmulti)
723 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); 731 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
724 if (enable_broadcast) 732 if (enable_broadcast)
@@ -730,8 +738,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
730 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); 738 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
731 if (disable_allmulti) 739 if (disable_allmulti)
732 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); 740 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
733 if (disable_promisc) 741 if (disable_promisc) {
742 if (!priv->vlan.filter_disabled)
743 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
744 0);
734 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); 745 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
746 }
735 747
736 ea->promisc_enabled = promisc_enabled; 748 ea->promisc_enabled = promisc_enabled;
737 ea->allmulti_enabled = allmulti_enabled; 749 ea->allmulti_enabled = allmulti_enabled;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 821caaab9bfb..3b9480fa3403 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -311,7 +311,7 @@ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
311 int err; 311 int err;
312 312
313 memset(in, 0, sizeof(in)); 313 memset(in, 0, sizeof(in));
314 MLX5_SET(ptys_reg, in, local_port, local_port); 314 MLX5_SET(pvlc_reg, in, local_port, local_port);
315 315
316 err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc, 316 err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
317 pvlc_size, MLX5_REG_PVLC, 0, 0); 317 pvlc_size, MLX5_REG_PVLC, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index dbcaf5df8967..28c19cc1a17c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -374,26 +374,31 @@ static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
374 int err; 374 int err;
375 int ret; 375 int ret;
376 376
377 mlxsw_core->emad.trans_active = true;
378
377 err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info); 379 err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
378 if (err) { 380 if (err) {
379 dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", 381 dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
380 mlxsw_core->emad.tid); 382 mlxsw_core->emad.tid);
381 dev_kfree_skb(skb); 383 dev_kfree_skb(skb);
382 return err; 384 goto trans_inactive_out;
383 } 385 }
384 386
385 mlxsw_core->emad.trans_active = true;
386 ret = wait_event_timeout(mlxsw_core->emad.wait, 387 ret = wait_event_timeout(mlxsw_core->emad.wait,
387 !(mlxsw_core->emad.trans_active), 388 !(mlxsw_core->emad.trans_active),
388 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS)); 389 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
389 if (!ret) { 390 if (!ret) {
390 dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n", 391 dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
391 mlxsw_core->emad.tid); 392 mlxsw_core->emad.tid);
392 mlxsw_core->emad.trans_active = false; 393 err = -EIO;
393 return -EIO; 394 goto trans_inactive_out;
394 } 395 }
395 396
396 return 0; 397 return 0;
398
399trans_inactive_out:
400 mlxsw_core->emad.trans_active = false;
401 return err;
397} 402}
398 403
399static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, 404static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index ffd55d030ce2..36fb1cec53c9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -187,6 +187,7 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
187{ 187{
188 u16 max_index, be_index; 188 u16 max_index, be_index;
189 u16 offset; /* byte offset inside the array */ 189 u16 offset; /* byte offset inside the array */
190 u8 in_byte_index;
190 191
191 BUG_ON(index && !item->element_size); 192 BUG_ON(index && !item->element_size);
192 if (item->offset % sizeof(u32) != 0 || 193 if (item->offset % sizeof(u32) != 0 ||
@@ -199,7 +200,8 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
199 max_index = (item->size.bytes << 3) / item->element_size - 1; 200 max_index = (item->size.bytes << 3) / item->element_size - 1;
200 be_index = max_index - index; 201 be_index = max_index - index;
201 offset = be_index * item->element_size >> 3; 202 offset = be_index * item->element_size >> 3;
202 *shift = index % (BITS_PER_BYTE / item->element_size) << 1; 203 in_byte_index = index % (BITS_PER_BYTE / item->element_size);
204 *shift = in_byte_index * item->element_size;
203 205
204 return item->offset + offset; 206 return item->offset + offset;
205} 207}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 462cea31ecbb..cef866c37648 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1582,11 +1582,11 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1582 1582
1583 if (in_mbox) 1583 if (in_mbox)
1584 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size); 1584 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
1585 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32); 1585 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1586 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr); 1586 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
1587 1587
1588 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32); 1588 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1589 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr); 1589 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
1590 1590
1591 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod); 1591 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1592 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0); 1592 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 3e52ee93438c..62cbbd1ada8d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1069,9 +1069,9 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1069 return 0; 1069 return 0;
1070 1070
1071err_register_netdev: 1071err_register_netdev:
1072err_port_admin_status_set:
1073err_port_mac_learning_mode_set: 1072err_port_mac_learning_mode_set:
1074err_port_stp_state_set: 1073err_port_stp_state_set:
1074err_port_admin_status_set:
1075err_port_mtu_set: 1075err_port_mtu_set:
1076err_port_speed_set: 1076err_port_speed_set:
1077err_port_swid_set: 1077err_port_swid_set:
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 66fd868152e5..b159ef8303cc 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -476,13 +476,12 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
476 mac[5] = tmp >> 8; 476 mac[5] = tmp >> 8;
477} 477}
478 478
479static void __lpc_eth_clock_enable(struct netdata_local *pldat, 479static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable)
480 bool enable)
481{ 480{
482 if (enable) 481 if (enable)
483 clk_enable(pldat->clk); 482 clk_prepare_enable(pldat->clk);
484 else 483 else
485 clk_disable(pldat->clk); 484 clk_disable_unprepare(pldat->clk);
486} 485}
487 486
488static void __lpc_params_setup(struct netdata_local *pldat) 487static void __lpc_params_setup(struct netdata_local *pldat)
@@ -1494,7 +1493,7 @@ err_out_free_irq:
1494err_out_iounmap: 1493err_out_iounmap:
1495 iounmap(pldat->net_base); 1494 iounmap(pldat->net_base);
1496err_out_disable_clocks: 1495err_out_disable_clocks:
1497 clk_disable(pldat->clk); 1496 clk_disable_unprepare(pldat->clk);
1498 clk_put(pldat->clk); 1497 clk_put(pldat->clk);
1499err_out_free_dev: 1498err_out_free_dev:
1500 free_netdev(ndev); 1499 free_netdev(ndev);
@@ -1519,7 +1518,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
1519 iounmap(pldat->net_base); 1518 iounmap(pldat->net_base);
1520 mdiobus_unregister(pldat->mii_bus); 1519 mdiobus_unregister(pldat->mii_bus);
1521 mdiobus_free(pldat->mii_bus); 1520 mdiobus_free(pldat->mii_bus);
1522 clk_disable(pldat->clk); 1521 clk_disable_unprepare(pldat->clk);
1523 clk_put(pldat->clk); 1522 clk_put(pldat->clk);
1524 free_netdev(ndev); 1523 free_netdev(ndev);
1525 1524
@@ -1540,7 +1539,7 @@ static int lpc_eth_drv_suspend(struct platform_device *pdev,
1540 if (netif_running(ndev)) { 1539 if (netif_running(ndev)) {
1541 netif_device_detach(ndev); 1540 netif_device_detach(ndev);
1542 __lpc_eth_shutdown(pldat); 1541 __lpc_eth_shutdown(pldat);
1543 clk_disable(pldat->clk); 1542 clk_disable_unprepare(pldat->clk);
1544 1543
1545 /* 1544 /*
1546 * Reset again now clock is disable to be sure 1545 * Reset again now clock is disable to be sure
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index a83263743665..2b7550c43f78 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2134,10 +2134,11 @@ static int rhine_rx(struct net_device *dev, int limit)
2134 } 2134 }
2135 2135
2136 skb_put(skb, pkt_len); 2136 skb_put(skb, pkt_len);
2137 skb->protocol = eth_type_trans(skb, dev);
2138 2137
2139 rhine_rx_vlan_tag(skb, desc, data_size); 2138 rhine_rx_vlan_tag(skb, desc, data_size);
2140 2139
2140 skb->protocol = eth_type_trans(skb, dev);
2141
2141 netif_receive_skb(skb); 2142 netif_receive_skb(skb);
2142 2143
2143 u64_stats_update_begin(&rp->rx_stats.syncp); 2144 u64_stats_update_begin(&rp->rx_stats.syncp);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 8f5c02eed47d..cde29f8a37bf 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -870,14 +870,14 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
870 __be16 dst_port = htons(GENEVE_UDP_PORT); 870 __be16 dst_port = htons(GENEVE_UDP_PORT);
871 __u8 ttl = 0, tos = 0; 871 __u8 ttl = 0, tos = 0;
872 bool metadata = false; 872 bool metadata = false;
873 __be32 rem_addr; 873 __be32 rem_addr = 0;
874 __u32 vni; 874 __u32 vni = 0;
875 875
876 if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE]) 876 if (data[IFLA_GENEVE_ID])
877 return -EINVAL; 877 vni = nla_get_u32(data[IFLA_GENEVE_ID]);
878 878
879 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 879 if (data[IFLA_GENEVE_REMOTE])
880 rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); 880 rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
881 881
882 if (data[IFLA_GENEVE_TTL]) 882 if (data[IFLA_GENEVE_TTL])
883 ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); 883 ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index c5ad98ace5d0..11e3975485c1 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -168,8 +168,6 @@ config MDIO_OCTEON
168 busses. It is required by the Octeon and ThunderX ethernet device 168 busses. It is required by the Octeon and ThunderX ethernet device
169 drivers. 169 drivers.
170 170
171 If in doubt, say Y.
172
173config MDIO_SUN4I 171config MDIO_SUN4I
174 tristate "Allwinner sun4i MDIO interface support" 172 tristate "Allwinner sun4i MDIO interface support"
175 depends on ARCH_SUNXI 173 depends on ARCH_SUNXI
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 3837ae344f63..2ed75060da50 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev)
313 if (po->pppoe_dev == dev && 313 if (po->pppoe_dev == dev &&
314 sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { 314 sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
315 pppox_unbind_sock(sk); 315 pppox_unbind_sock(sk);
316 sk->sk_state = PPPOX_ZOMBIE;
317 sk->sk_state_change(sk); 316 sk->sk_state_change(sk);
318 po->pppoe_dev = NULL; 317 po->pppoe_dev = NULL;
319 dev_put(dev); 318 dev_put(dev);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index fbb9325d1f6e..e66805eeffb4 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -164,6 +164,7 @@ config USB_NET_AX8817X
164 * Aten UC210T 164 * Aten UC210T
165 * ASIX AX88172 165 * ASIX AX88172
166 * Billionton Systems, USB2AR 166 * Billionton Systems, USB2AR
167 * Billionton Systems, GUSB2AM-1G-B
167 * Buffalo LUA-U2-KTX 168 * Buffalo LUA-U2-KTX
168 * Corega FEther USB2-TX 169 * Corega FEther USB2-TX
169 * D-Link DUB-E100 170 * D-Link DUB-E100
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 75d6f26729a3..079069a060a6 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -91,8 +91,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
91 } 91 }
92 rx->ax_skb = netdev_alloc_skb_ip_align(dev->net, 92 rx->ax_skb = netdev_alloc_skb_ip_align(dev->net,
93 rx->size); 93 rx->size);
94 if (!rx->ax_skb) 94 if (!rx->ax_skb) {
95 rx->size = 0;
95 return 0; 96 return 0;
97 }
96 } 98 }
97 99
98 if (rx->size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { 100 if (rx->size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 1173a24feda3..5cabefc23494 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -959,6 +959,10 @@ static const struct usb_device_id products [] = {
959 USB_DEVICE (0x08dd, 0x90ff), 959 USB_DEVICE (0x08dd, 0x90ff),
960 .driver_info = (unsigned long) &ax8817x_info, 960 .driver_info = (unsigned long) &ax8817x_info,
961}, { 961}, {
962 // Billionton Systems, GUSB2AM-1G-B
963 USB_DEVICE(0x08dd, 0x0114),
964 .driver_info = (unsigned long) &ax88178_info,
965}, {
962 // ATEN UC210T 966 // ATEN UC210T
963 USB_DEVICE (0x0557, 0x2009), 967 USB_DEVICE (0x0557, 0x2009),
964 .driver_info = (unsigned long) &ax8817x_info, 968 .driver_info = (unsigned long) &ax8817x_info,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bbac1d35ed4e..afdc65fd5bc5 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2745,11 +2745,10 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2745 struct vxlan_config conf; 2745 struct vxlan_config conf;
2746 int err; 2746 int err;
2747 2747
2748 if (!data[IFLA_VXLAN_ID])
2749 return -EINVAL;
2750
2751 memset(&conf, 0, sizeof(conf)); 2748 memset(&conf, 0, sizeof(conf));
2752 conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]); 2749
2750 if (data[IFLA_VXLAN_ID])
2751 conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2753 2752
2754 if (data[IFLA_VXLAN_GROUP]) { 2753 if (data[IFLA_VXLAN_GROUP]) {
2755 conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); 2754 conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 23afcda2de96..678d72af4a9d 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -337,7 +337,7 @@ enum ath10k_hw_rate_cck {
337#define TARGET_10X_MAX_FRAG_ENTRIES 0 337#define TARGET_10X_MAX_FRAG_ENTRIES 0
338 338
339/* 10.2 parameters */ 339/* 10.2 parameters */
340#define TARGET_10_2_DMA_BURST_SIZE 1 340#define TARGET_10_2_DMA_BURST_SIZE 0
341 341
342/* Target specific defines for WMI-TLV firmware */ 342/* Target specific defines for WMI-TLV firmware */
343#define TARGET_TLV_NUM_VDEVS 4 343#define TARGET_TLV_NUM_VDEVS 4
@@ -391,7 +391,7 @@ enum ath10k_hw_rate_cck {
391 391
392#define TARGET_10_4_TX_DBG_LOG_SIZE 1024 392#define TARGET_10_4_TX_DBG_LOG_SIZE 1024
393#define TARGET_10_4_NUM_WDS_ENTRIES 32 393#define TARGET_10_4_NUM_WDS_ENTRIES 32
394#define TARGET_10_4_DMA_BURST_SIZE 1 394#define TARGET_10_4_DMA_BURST_SIZE 0
395#define TARGET_10_4_MAC_AGGR_DELIM 0 395#define TARGET_10_4_MAC_AGGR_DELIM 0
396#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 396#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
397#define TARGET_10_4_VOW_CONFIG 0 397#define TARGET_10_4_VOW_CONFIG 0
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 57f95f2dca5b..90eb75012e4f 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -880,6 +880,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
880 hw->max_rate_tries = 10; 880 hw->max_rate_tries = 10;
881 hw->sta_data_size = sizeof(struct ath_node); 881 hw->sta_data_size = sizeof(struct ath_node);
882 hw->vif_data_size = sizeof(struct ath_vif); 882 hw->vif_data_size = sizeof(struct ath_vif);
883 hw->extra_tx_headroom = 4;
883 884
884 hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1; 885 hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
885 hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1; 886 hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 28490702124a..71d3e9adbf3c 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -120,6 +120,7 @@ MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if over
120#ifdef CONFIG_B43_BCMA 120#ifdef CONFIG_B43_BCMA
121static const struct bcma_device_id b43_bcma_tbl[] = { 121static const struct bcma_device_id b43_bcma_tbl[] = {
122 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS), 122 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS),
123 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x15, BCMA_ANY_CLASS),
123 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS), 124 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
124 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS), 125 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
125 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1C, BCMA_ANY_CLASS), 126 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1C, BCMA_ANY_CLASS),
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index ab45819c1fbb..e18629a16fb0 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -1020,7 +1020,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
1020 u8 *pn = seq.ccmp.pn; 1020 u8 *pn = seq.ccmp.pn;
1021 1021
1022 ieee80211_get_key_rx_seq(key, i, &seq); 1022 ieee80211_get_key_rx_seq(key, i, &seq);
1023 aes_sc->pn = cpu_to_le64( 1023 aes_sc[i].pn = cpu_to_le64(
1024 (u64)pn[5] | 1024 (u64)pn[5] |
1025 ((u64)pn[4] << 8) | 1025 ((u64)pn[4] << 8) |
1026 ((u64)pn[3] << 16) | 1026 ((u64)pn[3] << 16) |
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 6951aba620eb..3fb327d5a911 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -348,6 +348,6 @@ const struct iwl_cfg iwl7265d_n_cfg = {
348}; 348};
349 349
350MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 350MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
351MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); 351MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
352MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 352MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
353MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 353MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 04264e417c1c..576187611e61 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -274,18 +274,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
274 break; 274 break;
275 case WLAN_CIPHER_SUITE_CCMP: 275 case WLAN_CIPHER_SUITE_CCMP:
276 if (sta) { 276 if (sta) {
277 u8 *pn = seq.ccmp.pn; 277 u64 pn64;
278 278
279 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; 279 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
280 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; 280 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
281 281
282 ieee80211_get_key_tx_seq(key, &seq); 282 pn64 = atomic64_read(&key->tx_pn);
283 aes_tx_sc->pn = cpu_to_le64((u64)pn[5] | 283 aes_tx_sc->pn = cpu_to_le64(pn64);
284 ((u64)pn[4] << 8) |
285 ((u64)pn[3] << 16) |
286 ((u64)pn[2] << 24) |
287 ((u64)pn[1] << 32) |
288 ((u64)pn[0] << 40));
289 } else { 284 } else {
290 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; 285 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
291 } 286 }
@@ -298,12 +293,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
298 u8 *pn = seq.ccmp.pn; 293 u8 *pn = seq.ccmp.pn;
299 294
300 ieee80211_get_key_rx_seq(key, i, &seq); 295 ieee80211_get_key_rx_seq(key, i, &seq);
301 aes_sc->pn = cpu_to_le64((u64)pn[5] | 296 aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
302 ((u64)pn[4] << 8) | 297 ((u64)pn[4] << 8) |
303 ((u64)pn[3] << 16) | 298 ((u64)pn[3] << 16) |
304 ((u64)pn[2] << 24) | 299 ((u64)pn[2] << 24) |
305 ((u64)pn[1] << 32) | 300 ((u64)pn[1] << 32) |
306 ((u64)pn[0] << 40)); 301 ((u64)pn[0] << 40));
307 } 302 }
308 data->use_rsc_tsc = true; 303 data->use_rsc_tsc = true;
309 break; 304 break;
@@ -1453,15 +1448,15 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
1453 1448
1454 switch (key->cipher) { 1449 switch (key->cipher) {
1455 case WLAN_CIPHER_SUITE_CCMP: 1450 case WLAN_CIPHER_SUITE_CCMP:
1456 iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq);
1457 iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key); 1451 iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
1452 atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
1458 break; 1453 break;
1459 case WLAN_CIPHER_SUITE_TKIP: 1454 case WLAN_CIPHER_SUITE_TKIP:
1460 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); 1455 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
1461 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); 1456 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
1457 ieee80211_set_key_tx_seq(key, &seq);
1462 break; 1458 break;
1463 } 1459 }
1464 ieee80211_set_key_tx_seq(key, &seq);
1465 1460
1466 /* that's it for this key */ 1461 /* that's it for this key */
1467 return; 1462 return;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 4a0ce83315bd..5c7f7cc9ffcc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -703,7 +703,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
703 * abort after reading the nvm in case RF Kill is on, we will complete 703 * abort after reading the nvm in case RF Kill is on, we will complete
704 * the init seq later when RF kill will switch to off 704 * the init seq later when RF kill will switch to off
705 */ 705 */
706 if (iwl_mvm_is_radio_killed(mvm)) { 706 if (iwl_mvm_is_radio_hw_killed(mvm)) {
707 IWL_DEBUG_RF_KILL(mvm, 707 IWL_DEBUG_RF_KILL(mvm,
708 "jump over all phy activities due to RF kill\n"); 708 "jump over all phy activities due to RF kill\n");
709 iwl_remove_notification(&mvm->notif_wait, &calib_wait); 709 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
@@ -736,7 +736,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
736 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, 736 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
737 MVM_UCODE_CALIB_TIMEOUT); 737 MVM_UCODE_CALIB_TIMEOUT);
738 738
739 if (ret && iwl_mvm_is_radio_killed(mvm)) { 739 if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
740 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); 740 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
741 ret = 1; 741 ret = 1;
742 } 742 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index aa8c2b7f23c7..7c2944a72470 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -2388,6 +2388,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2388 iwl_mvm_remove_time_event(mvm, mvmvif, 2388 iwl_mvm_remove_time_event(mvm, mvmvif,
2389 &mvmvif->time_event_data); 2389 &mvmvif->time_event_data);
2390 RCU_INIT_POINTER(mvm->csa_vif, NULL); 2390 RCU_INIT_POINTER(mvm->csa_vif, NULL);
2391 mvmvif->csa_countdown = false;
2391 } 2392 }
2392 2393
2393 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { 2394 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index b95a07ec9e36..c754051a4cea 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -860,6 +860,11 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
860 test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); 860 test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
861} 861}
862 862
863static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
864{
865 return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
866}
867
863/* Must be called with rcu_read_lock() held and it can only be 868/* Must be called with rcu_read_lock() held and it can only be
864 * released when mvmsta is not needed anymore. 869 * released when mvmsta is not needed anymore.
865 */ 870 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index a37de3f410a0..f0cb092f980e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -590,6 +590,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
590 ieee80211_unregister_hw(mvm->hw); 590 ieee80211_unregister_hw(mvm->hw);
591 iwl_mvm_leds_exit(mvm); 591 iwl_mvm_leds_exit(mvm);
592 out_free: 592 out_free:
593 flush_delayed_work(&mvm->fw_dump_wk);
593 iwl_phy_db_free(mvm->phy_db); 594 iwl_phy_db_free(mvm->phy_db);
594 kfree(mvm->scan_cmd); 595 kfree(mvm->scan_cmd);
595 if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name) 596 if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index b0825c402c73..644b58bc5226 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -414,6 +414,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
414 {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, 414 {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
415 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, 415 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
416 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, 416 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
417 {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)},
418 {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)},
419 {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
420 {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
421 {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
417 422
418/* 8000 Series */ 423/* 8000 Series */
419 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, 424 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 5932306084fd..bf9afbf46c1b 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1114,6 +1114,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1114 { USB_DEVICE(0x0db0, 0x871c) }, 1114 { USB_DEVICE(0x0db0, 0x871c) },
1115 { USB_DEVICE(0x0db0, 0x899a) }, 1115 { USB_DEVICE(0x0db0, 0x899a) },
1116 /* Ovislink */ 1116 /* Ovislink */
1117 { USB_DEVICE(0x1b75, 0x3070) },
1117 { USB_DEVICE(0x1b75, 0x3071) }, 1118 { USB_DEVICE(0x1b75, 0x3071) },
1118 { USB_DEVICE(0x1b75, 0x3072) }, 1119 { USB_DEVICE(0x1b75, 0x3072) },
1119 { USB_DEVICE(0x1b75, 0xa200) }, 1120 { USB_DEVICE(0x1b75, 0xa200) },
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index d4567d12e07e..5da6703942d9 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -247,6 +247,8 @@ struct rtl_pci {
247 /* MSI support */ 247 /* MSI support */
248 bool msi_support; 248 bool msi_support;
249 bool using_msi; 249 bool using_msi;
250 /* interrupt clear before set */
251 bool int_clear;
250}; 252};
251 253
252struct mp_adapter { 254struct mp_adapter {
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index b7f18e2155eb..6e9418ed90c2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -2253,11 +2253,28 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
2253 } 2253 }
2254} 2254}
2255 2255
2256static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw)
2257{
2258 struct rtl_priv *rtlpriv = rtl_priv(hw);
2259 u32 tmp = rtl_read_dword(rtlpriv, REG_HISR);
2260
2261 rtl_write_dword(rtlpriv, REG_HISR, tmp);
2262
2263 tmp = rtl_read_dword(rtlpriv, REG_HISRE);
2264 rtl_write_dword(rtlpriv, REG_HISRE, tmp);
2265
2266 tmp = rtl_read_dword(rtlpriv, REG_HSISR);
2267 rtl_write_dword(rtlpriv, REG_HSISR, tmp);
2268}
2269
2256void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw) 2270void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
2257{ 2271{
2258 struct rtl_priv *rtlpriv = rtl_priv(hw); 2272 struct rtl_priv *rtlpriv = rtl_priv(hw);
2259 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 2273 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2260 2274
2275 if (!rtlpci->int_clear)
2276 rtl8821ae_clear_interrupt(hw);/*clear it here first*/
2277
2261 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); 2278 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
2262 rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); 2279 rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
2263 rtlpci->irq_enabled = true; 2280 rtlpci->irq_enabled = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
index a4988121e1ab..8ee141a55bc5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
@@ -96,6 +96,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
96 96
97 rtl8821ae_bt_reg_init(hw); 97 rtl8821ae_bt_reg_init(hw);
98 rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; 98 rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
99 rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear;
99 rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); 100 rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
100 101
101 rtlpriv->dm.dm_initialgain_enable = 1; 102 rtlpriv->dm.dm_initialgain_enable = 1;
@@ -167,6 +168,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
167 rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; 168 rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
168 rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; 169 rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
169 rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; 170 rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
171 rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear;
170 if (rtlpriv->cfg->mod_params->disable_watchdog) 172 if (rtlpriv->cfg->mod_params->disable_watchdog)
171 pr_info("watchdog disabled\n"); 173 pr_info("watchdog disabled\n");
172 rtlpriv->psc.reg_fwctrl_lps = 3; 174 rtlpriv->psc.reg_fwctrl_lps = 3;
@@ -308,6 +310,7 @@ static struct rtl_mod_params rtl8821ae_mod_params = {
308 .swctrl_lps = false, 310 .swctrl_lps = false,
309 .fwctrl_lps = true, 311 .fwctrl_lps = true,
310 .msi_support = true, 312 .msi_support = true,
313 .int_clear = true,
311 .debug = DBG_EMERG, 314 .debug = DBG_EMERG,
312 .disable_watchdog = 0, 315 .disable_watchdog = 0,
313}; 316};
@@ -437,6 +440,7 @@ module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444);
437module_param_named(msi, rtl8821ae_mod_params.msi_support, bool, 0444); 440module_param_named(msi, rtl8821ae_mod_params.msi_support, bool, 0444);
438module_param_named(disable_watchdog, rtl8821ae_mod_params.disable_watchdog, 441module_param_named(disable_watchdog, rtl8821ae_mod_params.disable_watchdog,
439 bool, 0444); 442 bool, 0444);
443module_param_named(int_clear, rtl8821ae_mod_params.int_clear, bool, 0444);
440MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); 444MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
441MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); 445MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
442MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); 446MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
@@ -444,6 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
444MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); 448MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
445MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); 449MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
446MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); 450MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
451MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n");
447 452
448static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); 453static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
449 454
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index b90ca618b123..4544752a2ba8 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -2249,6 +2249,9 @@ struct rtl_mod_params {
2249 2249
2250 /* default 0: 1 means disable */ 2250 /* default 0: 1 means disable */
2251 bool disable_watchdog; 2251 bool disable_watchdog;
2252
2253 /* default 0: 1 means do not disable interrupts */
2254 bool int_clear;
2252}; 2255};
2253 2256
2254struct rtl_hal_usbint_cfg { 2257struct rtl_hal_usbint_cfg {
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 929a6e7e5ecf..56ebd8267386 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -788,6 +788,12 @@ static void connect(struct backend_info *be)
788 /* Use the number of queues requested by the frontend */ 788 /* Use the number of queues requested by the frontend */
789 be->vif->queues = vzalloc(requested_num_queues * 789 be->vif->queues = vzalloc(requested_num_queues *
790 sizeof(struct xenvif_queue)); 790 sizeof(struct xenvif_queue));
791 if (!be->vif->queues) {
792 xenbus_dev_fatal(dev, -ENOMEM,
793 "allocating queues");
794 return;
795 }
796
791 be->vif->num_queues = requested_num_queues; 797 be->vif->num_queues = requested_num_queues;
792 be->vif->stalled_queues = requested_num_queues; 798 be->vif->stalled_queues = requested_num_queues;
793 799
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 2365a32a595e..be3755c973e9 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -823,9 +823,15 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
823 } 823 }
824 824
825 /* Now look up the logical CPU number */ 825 /* Now look up the logical CPU number */
826 for_each_possible_cpu(cpu) 826 for_each_possible_cpu(cpu) {
827 if (dn == of_cpu_device_node_get(cpu)) 827 struct device_node *cpu_dn;
828
829 cpu_dn = of_cpu_device_node_get(cpu);
830 of_node_put(cpu_dn);
831
832 if (dn == cpu_dn)
828 break; 833 break;
834 }
829 835
830 if (cpu >= nr_cpu_ids) { 836 if (cpu >= nr_cpu_ids) {
831 pr_warn("Failed to find logical CPU for %s\n", 837 pr_warn("Failed to find logical CPU for %s\n",
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index 23685e74917e..bd2c69f85949 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -116,7 +116,7 @@ static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
116 if (ret) 116 if (ret)
117 goto error_ret; 117 goto error_ret;
118 118
119 for (i = 0; i < num_read; i++) 119 for (i = 0; i < num_read / sizeof(u16); i++)
120 *(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i); 120 *(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i);
121 121
122 if (copy_to_user(buf, rx, num_read)) 122 if (copy_to_user(buf, rx, num_read))
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 3f7715c9968b..47fc00a3f63b 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -915,11 +915,12 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
915 case IIO_CHAN_INFO_OFFSET: 915 case IIO_CHAN_INFO_OFFSET:
916 if (chan->type == IIO_TEMP) { 916 if (chan->type == IIO_TEMP) {
917 /* The calculated value from the ADC is in Kelvin, we 917 /* The calculated value from the ADC is in Kelvin, we
918 * want Celsius for hwmon so the offset is 918 * want Celsius for hwmon so the offset is -273.15
919 * -272.15 * scale 919 * The offset is applied before scaling so it is
920 * actually -213.15 * 4 / 1.012 = -1079.644268
920 */ 921 */
921 *val = -1075; 922 *val = -1079;
922 *val2 = 691699; 923 *val2 = 644268;
923 924
924 return IIO_VAL_INT_PLUS_MICRO; 925 return IIO_VAL_INT_PLUS_MICRO;
925 } 926 }
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 0bae8cc6c23a..ca920b0ecf8f 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -932,7 +932,7 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
932 932
933 if (data->soc == SOC_ARCH_EXYNOS5260) 933 if (data->soc == SOC_ARCH_EXYNOS5260)
934 emul_con = EXYNOS5260_EMUL_CON; 934 emul_con = EXYNOS5260_EMUL_CON;
935 if (data->soc == SOC_ARCH_EXYNOS5433) 935 else if (data->soc == SOC_ARCH_EXYNOS5433)
936 emul_con = EXYNOS5433_TMU_EMUL_CON; 936 emul_con = EXYNOS5433_TMU_EMUL_CON;
937 else if (data->soc == SOC_ARCH_EXYNOS7) 937 else if (data->soc == SOC_ARCH_EXYNOS7)
938 emul_con = EXYNOS7_TMU_REG_EMUL_CON; 938 emul_con = EXYNOS7_TMU_REG_EMUL_CON;
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 21d01a491405..e508939daea3 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -80,10 +80,6 @@ int serial8250_tx_dma(struct uart_8250_port *p)
80 return 0; 80 return 0;
81 81
82 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 82 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
83 if (dma->tx_size < p->port.fifosize) {
84 ret = -EINVAL;
85 goto err;
86 }
87 83
88 desc = dmaengine_prep_slave_single(dma->txchan, 84 desc = dmaengine_prep_slave_single(dma->txchan,
89 dma->tx_addr + xmit->tail, 85 dma->tx_addr + xmit->tail,
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c79d33676672..c47d3e480586 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -147,6 +147,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
147 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 147 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
148 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { 148 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
149 xhci->quirks |= XHCI_SPURIOUS_REBOOT; 149 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
150 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
150 } 151 }
151 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 152 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
152 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || 153 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 43291f93afeb..97ffe3997273 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2191,6 +2191,10 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2191 } 2191 }
2192 /* Fast path - was this the last TRB in the TD for this URB? */ 2192 /* Fast path - was this the last TRB in the TD for this URB? */
2193 } else if (event_trb == td->last_trb) { 2193 } else if (event_trb == td->last_trb) {
2194 if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
2195 return finish_td(xhci, td, event_trb, event, ep,
2196 status, false);
2197
2194 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 2198 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2195 td->urb->actual_length = 2199 td->urb->actual_length =
2196 td->urb->transfer_buffer_length - 2200 td->urb->transfer_buffer_length -
@@ -2242,6 +2246,12 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2242 td->urb->actual_length += 2246 td->urb->actual_length +=
2243 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 2247 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2244 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2248 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2249
2250 if (trb_comp_code == COMP_SHORT_TX) {
2251 xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
2252 td->urb_length_set = true;
2253 return 0;
2254 }
2245 } 2255 }
2246 2256
2247 return finish_td(xhci, td, event_trb, event, ep, status, false); 2257 return finish_td(xhci, td, event_trb, event, ep, status, false);
@@ -2274,6 +2284,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2274 u32 trb_comp_code; 2284 u32 trb_comp_code;
2275 int ret = 0; 2285 int ret = 0;
2276 int td_num = 0; 2286 int td_num = 0;
2287 bool handling_skipped_tds = false;
2277 2288
2278 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 2289 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2279 xdev = xhci->devs[slot_id]; 2290 xdev = xhci->devs[slot_id];
@@ -2410,6 +2421,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2410 ep->skip = true; 2421 ep->skip = true;
2411 xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); 2422 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2412 goto cleanup; 2423 goto cleanup;
2424 case COMP_PING_ERR:
2425 ep->skip = true;
2426 xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
2427 goto cleanup;
2413 default: 2428 default:
2414 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { 2429 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2415 status = 0; 2430 status = 0;
@@ -2546,13 +2561,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2546 ep, &status); 2561 ep, &status);
2547 2562
2548cleanup: 2563cleanup:
2564
2565
2566 handling_skipped_tds = ep->skip &&
2567 trb_comp_code != COMP_MISSED_INT &&
2568 trb_comp_code != COMP_PING_ERR;
2569
2549 /* 2570 /*
2550 * Do not update event ring dequeue pointer if ep->skip is set. 2571 * Do not update event ring dequeue pointer if we're in a loop
2551 * Will roll back to continue process missed tds. 2572 * processing missed tds.
2552 */ 2573 */
2553 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { 2574 if (!handling_skipped_tds)
2554 inc_deq(xhci, xhci->event_ring); 2575 inc_deq(xhci, xhci->event_ring);
2555 }
2556 2576
2557 if (ret) { 2577 if (ret) {
2558 urb = td->urb; 2578 urb = td->urb;
@@ -2587,7 +2607,7 @@ cleanup:
2587 * Process them as short transfer until reach the td pointed by 2607 * Process them as short transfer until reach the td pointed by
2588 * the event. 2608 * the event.
2589 */ 2609 */
2590 } while (ep->skip && trb_comp_code != COMP_MISSED_INT); 2610 } while (handling_skipped_tds);
2591 2611
2592 return 0; 2612 return 0;
2593} 2613}
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 1aaf89300621..92f394927f24 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1093,6 +1093,7 @@ static void fbcon_init(struct vc_data *vc, int init)
1093 con_copy_unimap(vc, svc); 1093 con_copy_unimap(vc, svc);
1094 1094
1095 ops = info->fbcon_par; 1095 ops = info->fbcon_par;
1096 ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
1096 p->con_rotate = initial_rotation; 1097 p->con_rotate = initial_rotation;
1097 set_blitting_type(vc, info); 1098 set_blitting_type(vc, info);
1098 1099
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index b823fac91c92..8c6f247ba81d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2584,7 +2584,7 @@ static long btrfs_fallocate(struct file *file, int mode,
2584 alloc_start); 2584 alloc_start);
2585 if (ret) 2585 if (ret)
2586 goto out; 2586 goto out;
2587 } else { 2587 } else if (offset + len > inode->i_size) {
2588 /* 2588 /*
2589 * If we are fallocating from the end of the file onward we 2589 * If we are fallocating from the end of the file onward we
2590 * need to zero out the end of the page if i_size lands in the 2590 * need to zero out the end of the page if i_size lands in the
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 3e3e6130637f..8d20f3b1cab0 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4641,7 +4641,7 @@ locked:
4641 4641
4642 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) { 4642 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
4643 ret = -EINVAL; 4643 ret = -EINVAL;
4644 goto out_bargs; 4644 goto out_bctl;
4645 } 4645 }
4646 4646
4647do_balance: 4647do_balance:
@@ -4655,12 +4655,15 @@ do_balance:
4655 need_unlock = false; 4655 need_unlock = false;
4656 4656
4657 ret = btrfs_balance(bctl, bargs); 4657 ret = btrfs_balance(bctl, bargs);
4658 bctl = NULL;
4658 4659
4659 if (arg) { 4660 if (arg) {
4660 if (copy_to_user(arg, bargs, sizeof(*bargs))) 4661 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4661 ret = -EFAULT; 4662 ret = -EFAULT;
4662 } 4663 }
4663 4664
4665out_bctl:
4666 kfree(bctl);
4664out_bargs: 4667out_bargs:
4665 kfree(bargs); 4668 kfree(bargs);
4666out_unlock: 4669out_unlock:
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 091a36444972..29e4599f6fc1 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -778,19 +778,24 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
778 struct wb_writeback_work *base_work, 778 struct wb_writeback_work *base_work,
779 bool skip_if_busy) 779 bool skip_if_busy)
780{ 780{
781 int next_memcg_id = 0; 781 struct bdi_writeback *last_wb = NULL;
782 struct bdi_writeback *wb; 782 struct bdi_writeback *wb = list_entry_rcu(&bdi->wb_list,
783 struct wb_iter iter; 783 struct bdi_writeback, bdi_node);
784 784
785 might_sleep(); 785 might_sleep();
786restart: 786restart:
787 rcu_read_lock(); 787 rcu_read_lock();
788 bdi_for_each_wb(wb, bdi, &iter, next_memcg_id) { 788 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
789 DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done); 789 DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done);
790 struct wb_writeback_work fallback_work; 790 struct wb_writeback_work fallback_work;
791 struct wb_writeback_work *work; 791 struct wb_writeback_work *work;
792 long nr_pages; 792 long nr_pages;
793 793
794 if (last_wb) {
795 wb_put(last_wb);
796 last_wb = NULL;
797 }
798
794 /* SYNC_ALL writes out I_DIRTY_TIME too */ 799 /* SYNC_ALL writes out I_DIRTY_TIME too */
795 if (!wb_has_dirty_io(wb) && 800 if (!wb_has_dirty_io(wb) &&
796 (base_work->sync_mode == WB_SYNC_NONE || 801 (base_work->sync_mode == WB_SYNC_NONE ||
@@ -819,12 +824,22 @@ restart:
819 824
820 wb_queue_work(wb, work); 825 wb_queue_work(wb, work);
821 826
822 next_memcg_id = wb->memcg_css->id + 1; 827 /*
828 * Pin @wb so that it stays on @bdi->wb_list. This allows
829 * continuing iteration from @wb after dropping and
830 * regrabbing rcu read lock.
831 */
832 wb_get(wb);
833 last_wb = wb;
834
823 rcu_read_unlock(); 835 rcu_read_unlock();
824 wb_wait_for_completion(bdi, &fallback_work_done); 836 wb_wait_for_completion(bdi, &fallback_work_done);
825 goto restart; 837 goto restart;
826 } 838 }
827 rcu_read_unlock(); 839 rcu_read_unlock();
840
841 if (last_wb)
842 wb_put(last_wb);
828} 843}
829 844
830#else /* CONFIG_CGROUP_WRITEBACK */ 845#else /* CONFIG_CGROUP_WRITEBACK */
@@ -1857,12 +1872,11 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1857 rcu_read_lock(); 1872 rcu_read_lock();
1858 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1873 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1859 struct bdi_writeback *wb; 1874 struct bdi_writeback *wb;
1860 struct wb_iter iter;
1861 1875
1862 if (!bdi_has_dirty_io(bdi)) 1876 if (!bdi_has_dirty_io(bdi))
1863 continue; 1877 continue;
1864 1878
1865 bdi_for_each_wb(wb, bdi, &iter, 0) 1879 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
1866 wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages), 1880 wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages),
1867 false, reason); 1881 false, reason);
1868 } 1882 }
@@ -1894,11 +1908,10 @@ static void wakeup_dirtytime_writeback(struct work_struct *w)
1894 rcu_read_lock(); 1908 rcu_read_lock();
1895 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1909 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1896 struct bdi_writeback *wb; 1910 struct bdi_writeback *wb;
1897 struct wb_iter iter;
1898 1911
1899 bdi_for_each_wb(wb, bdi, &iter, 0) 1912 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
1900 if (!list_empty(&bdi->wb.b_dirty_time)) 1913 if (!list_empty(&wb->b_dirty_time))
1901 wb_wakeup(&bdi->wb); 1914 wb_wakeup(wb);
1902 } 1915 }
1903 rcu_read_unlock(); 1916 rcu_read_unlock();
1904 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 1917 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index ee5aa4daaea0..ce38b4ccc9ab 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1658,12 +1658,13 @@ send_response:
1658 if (ret < 0) { 1658 if (ret < 0) {
1659 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1659 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1660 response = DLM_MASTER_RESP_ERROR; 1660 response = DLM_MASTER_RESP_ERROR;
1661 spin_unlock(&res->spinlock);
1661 dlm_lockres_put(res); 1662 dlm_lockres_put(res);
1662 } else { 1663 } else {
1663 dispatched = 1; 1664 dispatched = 1;
1664 __dlm_lockres_grab_inflight_worker(dlm, res); 1665 __dlm_lockres_grab_inflight_worker(dlm, res);
1666 spin_unlock(&res->spinlock);
1665 } 1667 }
1666 spin_unlock(&res->spinlock);
1667 } else { 1668 } else {
1668 if (res) 1669 if (res)
1669 dlm_lockres_put(res); 1670 dlm_lockres_put(res);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 3d90ad7ff91f..58eaa5c0d387 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1723,8 +1723,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1723 } else { 1723 } else {
1724 dispatched = 1; 1724 dispatched = 1;
1725 __dlm_lockres_grab_inflight_worker(dlm, res); 1725 __dlm_lockres_grab_inflight_worker(dlm, res);
1726 spin_unlock(&res->spinlock);
1726 } 1727 }
1727 spin_unlock(&res->spinlock);
1728 } else { 1728 } else {
1729 /* put.. incase we are not the master */ 1729 /* put.. incase we are not the master */
1730 spin_unlock(&res->spinlock); 1730 spin_unlock(&res->spinlock);
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index a23209b43842..1b4d69f68c33 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -116,6 +116,8 @@ struct bdi_writeback {
116 struct list_head work_list; 116 struct list_head work_list;
117 struct delayed_work dwork; /* work item used for writeback */ 117 struct delayed_work dwork; /* work item used for writeback */
118 118
119 struct list_head bdi_node; /* anchored at bdi->wb_list */
120
119#ifdef CONFIG_CGROUP_WRITEBACK 121#ifdef CONFIG_CGROUP_WRITEBACK
120 struct percpu_ref refcnt; /* used only for !root wb's */ 122 struct percpu_ref refcnt; /* used only for !root wb's */
121 struct fprop_local_percpu memcg_completions; 123 struct fprop_local_percpu memcg_completions;
@@ -150,6 +152,7 @@ struct backing_dev_info {
150 atomic_long_t tot_write_bandwidth; 152 atomic_long_t tot_write_bandwidth;
151 153
152 struct bdi_writeback wb; /* the root writeback info for this bdi */ 154 struct bdi_writeback wb; /* the root writeback info for this bdi */
155 struct list_head wb_list; /* list of all wbs */
153#ifdef CONFIG_CGROUP_WRITEBACK 156#ifdef CONFIG_CGROUP_WRITEBACK
154 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ 157 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
155 struct rb_root cgwb_congested_tree; /* their congested states */ 158 struct rb_root cgwb_congested_tree; /* their congested states */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index d5eb4ad1c534..c85f74946a8b 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -19,13 +19,17 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20 20
21int __must_check bdi_init(struct backing_dev_info *bdi); 21int __must_check bdi_init(struct backing_dev_info *bdi);
22void bdi_destroy(struct backing_dev_info *bdi); 22void bdi_exit(struct backing_dev_info *bdi);
23 23
24__printf(3, 4) 24__printf(3, 4)
25int bdi_register(struct backing_dev_info *bdi, struct device *parent, 25int bdi_register(struct backing_dev_info *bdi, struct device *parent,
26 const char *fmt, ...); 26 const char *fmt, ...);
27int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 27int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
28void bdi_unregister(struct backing_dev_info *bdi);
29
28int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); 30int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
31void bdi_destroy(struct backing_dev_info *bdi);
32
29void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, 33void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
30 bool range_cyclic, enum wb_reason reason); 34 bool range_cyclic, enum wb_reason reason);
31void wb_start_background_writeback(struct bdi_writeback *wb); 35void wb_start_background_writeback(struct bdi_writeback *wb);
@@ -408,61 +412,6 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
408 rcu_read_unlock(); 412 rcu_read_unlock();
409} 413}
410 414
411struct wb_iter {
412 int start_memcg_id;
413 struct radix_tree_iter tree_iter;
414 void **slot;
415};
416
417static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
418 struct backing_dev_info *bdi)
419{
420 struct radix_tree_iter *titer = &iter->tree_iter;
421
422 WARN_ON_ONCE(!rcu_read_lock_held());
423
424 if (iter->start_memcg_id >= 0) {
425 iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id);
426 iter->start_memcg_id = -1;
427 } else {
428 iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
429 }
430
431 if (!iter->slot)
432 iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
433 if (iter->slot)
434 return *iter->slot;
435 return NULL;
436}
437
438static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
439 struct backing_dev_info *bdi,
440 int start_memcg_id)
441{
442 iter->start_memcg_id = start_memcg_id;
443
444 if (start_memcg_id)
445 return __wb_iter_next(iter, bdi);
446 else
447 return &bdi->wb;
448}
449
450/**
451 * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order
452 * @wb_cur: cursor struct bdi_writeback pointer
453 * @bdi: bdi to walk wb's of
454 * @iter: pointer to struct wb_iter to be used as iteration buffer
455 * @start_memcg_id: memcg ID to start iteration from
456 *
457 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
458 * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter
459 * to be used as temp storage during iteration. rcu_read_lock() must be
460 * held throughout iteration.
461 */
462#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \
463 for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \
464 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
465
466#else /* CONFIG_CGROUP_WRITEBACK */ 415#else /* CONFIG_CGROUP_WRITEBACK */
467 416
468static inline bool inode_cgwb_enabled(struct inode *inode) 417static inline bool inode_cgwb_enabled(struct inode *inode)
@@ -522,14 +471,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg)
522{ 471{
523} 472}
524 473
525struct wb_iter {
526 int next_id;
527};
528
529#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
530 for ((iter)->next_id = (start_blkcg_id); \
531 ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
532
533static inline int inode_congested(struct inode *inode, int cong_bits) 474static inline int inode_congested(struct inode *inode, int cong_bits)
534{ 475{
535 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); 476 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
diff --git a/include/linux/cma.h b/include/linux/cma.h
index f7ef093ec49a..29f9e774ab76 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base,
26extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 26extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
27 unsigned int order_per_bit, 27 unsigned int order_per_bit,
28 struct cma **res_cma); 28 struct cma **res_cma);
29extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align); 29extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
30extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); 30extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
31#endif 31#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index dfaa7b3e9ae9..8efb40e61d6e 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -237,12 +237,25 @@
237#define KASAN_ABI_VERSION 3 237#define KASAN_ABI_VERSION 3
238#endif 238#endif
239 239
240#if GCC_VERSION >= 40902
241/*
242 * Tell the compiler that address safety instrumentation (KASAN)
243 * should not be applied to that function.
244 * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
245 */
246#define __no_sanitize_address __attribute__((no_sanitize_address))
247#endif
248
240#endif /* gcc version >= 40000 specific checks */ 249#endif /* gcc version >= 40000 specific checks */
241 250
242#if !defined(__noclone) 251#if !defined(__noclone)
243#define __noclone /* not needed */ 252#define __noclone /* not needed */
244#endif 253#endif
245 254
255#if !defined(__no_sanitize_address)
256#define __no_sanitize_address
257#endif
258
246/* 259/*
247 * A trick to suppress uninitialized variable warning without generating any 260 * A trick to suppress uninitialized variable warning without generating any
248 * code 261 * code
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c836eb2dc44d..3d7810341b57 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -198,19 +198,45 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
198 198
199#include <uapi/linux/types.h> 199#include <uapi/linux/types.h>
200 200
201static __always_inline void __read_once_size(const volatile void *p, void *res, int size) 201#define __READ_ONCE_SIZE \
202({ \
203 switch (size) { \
204 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
205 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
206 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
207 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
208 default: \
209 barrier(); \
210 __builtin_memcpy((void *)res, (const void *)p, size); \
211 barrier(); \
212 } \
213})
214
215static __always_inline
216void __read_once_size(const volatile void *p, void *res, int size)
202{ 217{
203 switch (size) { 218 __READ_ONCE_SIZE;
204 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; 219}
205 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; 220
206 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; 221#ifdef CONFIG_KASAN
207 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; 222/*
208 default: 223 * This function is not 'inline' because __no_sanitize_address confilcts
209 barrier(); 224 * with inlining. Attempt to inline it may cause a build failure.
210 __builtin_memcpy((void *)res, (const void *)p, size); 225 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
211 barrier(); 226 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
212 } 227 */
228static __no_sanitize_address __maybe_unused
229void __read_once_size_nocheck(const volatile void *p, void *res, int size)
230{
231 __READ_ONCE_SIZE;
232}
233#else
234static __always_inline
235void __read_once_size_nocheck(const volatile void *p, void *res, int size)
236{
237 __READ_ONCE_SIZE;
213} 238}
239#endif
214 240
215static __always_inline void __write_once_size(volatile void *p, void *res, int size) 241static __always_inline void __write_once_size(volatile void *p, void *res, int size)
216{ 242{
@@ -248,8 +274,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
248 * required ordering. 274 * required ordering.
249 */ 275 */
250 276
251#define READ_ONCE(x) \ 277#define __READ_ONCE(x, check) \
252 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 278({ \
279 union { typeof(x) __val; char __c[1]; } __u; \
280 if (check) \
281 __read_once_size(&(x), __u.__c, sizeof(x)); \
282 else \
283 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
284 __u.__val; \
285})
286#define READ_ONCE(x) __READ_ONCE(x, 1)
287
288/*
289 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
290 * to hide memory access from KASAN.
291 */
292#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
253 293
254#define WRITE_ONCE(x, val) \ 294#define WRITE_ONCE(x, val) \
255({ \ 295({ \
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 569bbd039896..fec734df1524 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
111 return ret; 111 return ret;
112} 112}
113 113
114struct page *dma_alloc_from_contiguous(struct device *dev, int count, 114struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
115 unsigned int order); 115 unsigned int order);
116bool dma_release_from_contiguous(struct device *dev, struct page *pages, 116bool dma_release_from_contiguous(struct device *dev, struct page *pages,
117 int count); 117 int count);
@@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
144} 144}
145 145
146static inline 146static inline
147struct page *dma_alloc_from_contiguous(struct device *dev, int count, 147struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
148 unsigned int order) 148 unsigned int order)
149{ 149{
150 return NULL; 150 return NULL;
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6452ff4c463f..3e3318ddfc0e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -676,8 +676,9 @@ enum {
676 676
677struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); 677struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
678struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 678struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
679void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, 679void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
680 unsigned long *pdirty, unsigned long *pwriteback); 680 unsigned long *pheadroom, unsigned long *pdirty,
681 unsigned long *pwriteback);
681 682
682#else /* CONFIG_CGROUP_WRITEBACK */ 683#else /* CONFIG_CGROUP_WRITEBACK */
683 684
@@ -687,7 +688,8 @@ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
687} 688}
688 689
689static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 690static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
690 unsigned long *pavail, 691 unsigned long *pfilepages,
692 unsigned long *pheadroom,
691 unsigned long *pdirty, 693 unsigned long *pdirty,
692 unsigned long *pwriteback) 694 unsigned long *pwriteback)
693{ 695{
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index cb1b9bbda332..b36d837c701e 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -64,7 +64,7 @@ struct unix_sock {
64 struct socket_wq peer_wq; 64 struct socket_wq peer_wq;
65}; 65};
66 66
67static inline struct unix_sock *unix_sk(struct sock *sk) 67static inline struct unix_sock *unix_sk(const struct sock *sk)
68{ 68{
69 return (struct unix_sock *)sk; 69 return (struct unix_sock *)sk;
70} 70}
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 186f3a1e1b1f..fc1937698625 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -113,12 +113,12 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
113void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, 113void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
114 bool rearm); 114 bool rearm);
115 115
116static void inline inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo) 116static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
117{ 117{
118 __inet_twsk_schedule(tw, timeo, false); 118 __inet_twsk_schedule(tw, timeo, false);
119} 119}
120 120
121static void inline inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo) 121static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
122{ 122{
123 __inet_twsk_schedule(tw, timeo, true); 123 __inet_twsk_schedule(tw, timeo, true);
124} 124}
diff --git a/include/net/sock.h b/include/net/sock.h
index 7aa78440559a..e23717013a4e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -828,6 +828,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
828 if (sk_rcvqueues_full(sk, limit)) 828 if (sk_rcvqueues_full(sk, limit))
829 return -ENOBUFS; 829 return -ENOBUFS;
830 830
831 /*
832 * If the skb was allocated from pfmemalloc reserves, only
833 * allow SOCK_MEMALLOC sockets to use it as this socket is
834 * helping free memory
835 */
836 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
837 return -ENOMEM;
838
831 __sk_add_backlog(sk, skb); 839 __sk_add_backlog(sk, skb);
832 sk->sk_backlog.len += skb->truesize; 840 sk->sk_backlog.len += skb->truesize;
833 return 0; 841 return 0;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 884e728b09d9..26ede14597da 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -86,7 +86,7 @@
86 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ 86 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
87 SNDRV_CTL_ELEM_ACCESS_READWRITE, \ 87 SNDRV_CTL_ELEM_ACCESS_READWRITE, \
88 .tlv.p = (tlv_array),\ 88 .tlv.p = (tlv_array),\
89 .info = snd_soc_info_volsw, \ 89 .info = snd_soc_info_volsw_sx, \
90 .get = snd_soc_get_volsw_sx,\ 90 .get = snd_soc_get_volsw_sx,\
91 .put = snd_soc_put_volsw_sx, \ 91 .put = snd_soc_put_volsw_sx, \
92 .private_value = (unsigned long)&(struct soc_mixer_control) \ 92 .private_value = (unsigned long)&(struct soc_mixer_control) \
@@ -156,7 +156,7 @@
156 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ 156 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
157 SNDRV_CTL_ELEM_ACCESS_READWRITE, \ 157 SNDRV_CTL_ELEM_ACCESS_READWRITE, \
158 .tlv.p = (tlv_array), \ 158 .tlv.p = (tlv_array), \
159 .info = snd_soc_info_volsw, \ 159 .info = snd_soc_info_volsw_sx, \
160 .get = snd_soc_get_volsw_sx, \ 160 .get = snd_soc_get_volsw_sx, \
161 .put = snd_soc_put_volsw_sx, \ 161 .put = snd_soc_put_volsw_sx, \
162 .private_value = (unsigned long)&(struct soc_mixer_control) \ 162 .private_value = (unsigned long)&(struct soc_mixer_control) \
@@ -574,6 +574,8 @@ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
574 struct snd_ctl_elem_value *ucontrol); 574 struct snd_ctl_elem_value *ucontrol);
575int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, 575int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
576 struct snd_ctl_elem_info *uinfo); 576 struct snd_ctl_elem_info *uinfo);
577int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol,
578 struct snd_ctl_elem_info *uinfo);
577#define snd_soc_info_bool_ext snd_ctl_boolean_mono_info 579#define snd_soc_info_bool_ext snd_ctl_boolean_mono_info
578int snd_soc_get_volsw(struct snd_kcontrol *kcontrol, 580int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
579 struct snd_ctl_elem_value *ucontrol); 581 struct snd_ctl_elem_value *ucontrol);
diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
index 898be3a8db9a..6d8f8fba3341 100644
--- a/include/sound/wm8904.h
+++ b/include/sound/wm8904.h
@@ -119,7 +119,7 @@
119#define WM8904_MIC_REGS 2 119#define WM8904_MIC_REGS 2
120#define WM8904_GPIO_REGS 4 120#define WM8904_GPIO_REGS 4
121#define WM8904_DRC_REGS 4 121#define WM8904_DRC_REGS 4
122#define WM8904_EQ_REGS 25 122#define WM8904_EQ_REGS 24
123 123
124/** 124/**
125 * DRC configurations are specified with a label and a set of register 125 * DRC configurations are specified with a label and a set of register
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 32e07d8cbaf4..036f73bc54cd 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -323,10 +323,10 @@ enum ovs_key_attr {
323 OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls. 323 OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls.
324 * The implementation may restrict 324 * The implementation may restrict
325 * the accepted length of the array. */ 325 * the accepted length of the array. */
326 OVS_KEY_ATTR_CT_STATE, /* u8 bitmask of OVS_CS_F_* */ 326 OVS_KEY_ATTR_CT_STATE, /* u32 bitmask of OVS_CS_F_* */
327 OVS_KEY_ATTR_CT_ZONE, /* u16 connection tracking zone. */ 327 OVS_KEY_ATTR_CT_ZONE, /* u16 connection tracking zone. */
328 OVS_KEY_ATTR_CT_MARK, /* u32 connection tracking mark */ 328 OVS_KEY_ATTR_CT_MARK, /* u32 connection tracking mark */
329 OVS_KEY_ATTR_CT_LABEL, /* 16-octet connection tracking label */ 329 OVS_KEY_ATTR_CT_LABELS, /* 16-octet connection tracking label */
330 330
331#ifdef __KERNEL__ 331#ifdef __KERNEL__
332 OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */ 332 OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */
@@ -439,9 +439,9 @@ struct ovs_key_nd {
439 __u8 nd_tll[ETH_ALEN]; 439 __u8 nd_tll[ETH_ALEN];
440}; 440};
441 441
442#define OVS_CT_LABEL_LEN 16 442#define OVS_CT_LABELS_LEN 16
443struct ovs_key_ct_label { 443struct ovs_key_ct_labels {
444 __u8 ct_label[OVS_CT_LABEL_LEN]; 444 __u8 ct_labels[OVS_CT_LABELS_LEN];
445}; 445};
446 446
447/* OVS_KEY_ATTR_CT_STATE flags */ 447/* OVS_KEY_ATTR_CT_STATE flags */
@@ -449,9 +449,9 @@ struct ovs_key_ct_label {
449#define OVS_CS_F_ESTABLISHED 0x02 /* Part of an existing connection. */ 449#define OVS_CS_F_ESTABLISHED 0x02 /* Part of an existing connection. */
450#define OVS_CS_F_RELATED 0x04 /* Related to an established 450#define OVS_CS_F_RELATED 0x04 /* Related to an established
451 * connection. */ 451 * connection. */
452#define OVS_CS_F_INVALID 0x20 /* Could not track connection. */ 452#define OVS_CS_F_REPLY_DIR 0x08 /* Flow is in the reply direction. */
453#define OVS_CS_F_REPLY_DIR 0x40 /* Flow is in the reply direction. */ 453#define OVS_CS_F_INVALID 0x10 /* Could not track connection. */
454#define OVS_CS_F_TRACKED 0x80 /* Conntrack has occurred. */ 454#define OVS_CS_F_TRACKED 0x20 /* Conntrack has occurred. */
455 455
456/** 456/**
457 * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. 457 * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
@@ -618,22 +618,24 @@ struct ovs_action_hash {
618 618
619/** 619/**
620 * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action. 620 * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
621 * @OVS_CT_ATTR_FLAGS: u32 connection tracking flags. 621 * @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack
622 * table. This allows future packets for the same connection to be identified
623 * as 'established' or 'related'.
622 * @OVS_CT_ATTR_ZONE: u16 connection tracking zone. 624 * @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
623 * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the 625 * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
624 * mask, the corresponding bit in the value is copied to the connection 626 * mask, the corresponding bit in the value is copied to the connection
625 * tracking mark field in the connection. 627 * tracking mark field in the connection.
626 * @OVS_CT_ATTR_LABEL: %OVS_CT_LABEL_LEN value followed by %OVS_CT_LABEL_LEN 628 * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN
627 * mask. For each bit set in the mask, the corresponding bit in the value is 629 * mask. For each bit set in the mask, the corresponding bit in the value is
628 * copied to the connection tracking label field in the connection. 630 * copied to the connection tracking label field in the connection.
629 * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. 631 * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
630 */ 632 */
631enum ovs_ct_attr { 633enum ovs_ct_attr {
632 OVS_CT_ATTR_UNSPEC, 634 OVS_CT_ATTR_UNSPEC,
633 OVS_CT_ATTR_FLAGS, /* u8 bitmask of OVS_CT_F_*. */ 635 OVS_CT_ATTR_COMMIT, /* No argument, commits connection. */
634 OVS_CT_ATTR_ZONE, /* u16 zone id. */ 636 OVS_CT_ATTR_ZONE, /* u16 zone id. */
635 OVS_CT_ATTR_MARK, /* mark to associate with this connection. */ 637 OVS_CT_ATTR_MARK, /* mark to associate with this connection. */
636 OVS_CT_ATTR_LABEL, /* label to associate with this connection. */ 638 OVS_CT_ATTR_LABELS, /* labels to associate with this connection. */
637 OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of 639 OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of
638 related connections. */ 640 related connections. */
639 __OVS_CT_ATTR_MAX 641 __OVS_CT_ATTR_MAX
@@ -641,14 +643,6 @@ enum ovs_ct_attr {
641 643
642#define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1) 644#define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1)
643 645
644/*
645 * OVS_CT_ATTR_FLAGS flags - bitmask of %OVS_CT_F_*
646 * @OVS_CT_F_COMMIT: Commits the flow to the conntrack table. This allows
647 * future packets for the same connection to be identified as 'established'
648 * or 'related'.
649 */
650#define OVS_CT_F_COMMIT 0x01
651
652/** 646/**
653 * enum ovs_action_attr - Action types. 647 * enum ovs_action_attr - Action types.
654 * 648 *
@@ -705,7 +699,7 @@ enum ovs_action_attr {
705 * data immediately followed by a mask. 699 * data immediately followed by a mask.
706 * The data must be zero for the unmasked 700 * The data must be zero for the unmasked
707 * bits. */ 701 * bits. */
708 OVS_ACTION_ATTR_CT, /* One nested OVS_CT_ATTR_* . */ 702 OVS_ACTION_ATTR_CT, /* Nested OVS_CT_ATTR_* . */
709 703
710 __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted 704 __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
711 * from userspace. */ 705 * from userspace. */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 702024769c74..9d8f5d10c1e5 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -160,7 +160,7 @@ struct rtattr {
160 160
161/* Macros to handle rtattributes */ 161/* Macros to handle rtattributes */
162 162
163#define RTA_ALIGNTO 4 163#define RTA_ALIGNTO 4U
164#define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) ) 164#define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) )
165#define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \ 165#define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \
166 (rta)->rta_len >= sizeof(struct rtattr) && \ 166 (rta)->rta_len >= sizeof(struct rtattr) && \
diff --git a/kernel/kmod.c b/kernel/kmod.c
index da98d0593de2..0277d1216f80 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -327,9 +327,13 @@ static void call_usermodehelper_exec_work(struct work_struct *work)
327 call_usermodehelper_exec_sync(sub_info); 327 call_usermodehelper_exec_sync(sub_info);
328 } else { 328 } else {
329 pid_t pid; 329 pid_t pid;
330 330 /*
331 * Use CLONE_PARENT to reparent it to kthreadd; we do not
332 * want to pollute current->children, and we need a parent
333 * that always ignores SIGCHLD to ensure auto-reaping.
334 */
331 pid = kernel_thread(call_usermodehelper_exec_async, sub_info, 335 pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
332 SIGCHLD); 336 CLONE_PARENT | SIGCHLD);
333 if (pid < 0) { 337 if (pid < 0) {
334 sub_info->retval = pid; 338 sub_info->retval = pid;
335 umh_complete(sub_info); 339 umh_complete(sub_info);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 10a8faa1b0d4..bcd214e4b4d6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2366,8 +2366,15 @@ void wake_up_new_task(struct task_struct *p)
2366 trace_sched_wakeup_new(p); 2366 trace_sched_wakeup_new(p);
2367 check_preempt_curr(rq, p, WF_FORK); 2367 check_preempt_curr(rq, p, WF_FORK);
2368#ifdef CONFIG_SMP 2368#ifdef CONFIG_SMP
2369 if (p->sched_class->task_woken) 2369 if (p->sched_class->task_woken) {
2370 /*
2371 * Nothing relies on rq->lock after this, so its fine to
2372 * drop it.
2373 */
2374 lockdep_unpin_lock(&rq->lock);
2370 p->sched_class->task_woken(rq, p); 2375 p->sched_class->task_woken(rq, p);
2376 lockdep_pin_lock(&rq->lock);
2377 }
2371#endif 2378#endif
2372 task_rq_unlock(rq, p, &flags); 2379 task_rq_unlock(rq, p, &flags);
2373} 2380}
@@ -7238,9 +7245,6 @@ void __init sched_init_smp(void)
7238 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 7245 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
7239 alloc_cpumask_var(&fallback_doms, GFP_KERNEL); 7246 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
7240 7247
7241 /* nohz_full won't take effect without isolating the cpus. */
7242 tick_nohz_full_add_cpus_to(cpu_isolated_map);
7243
7244 sched_init_numa(); 7248 sched_init_numa();
7245 7249
7246 /* 7250 /*
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index fc8f01083527..8b0a15e285f9 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -668,8 +668,15 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
668 * Queueing this task back might have overloaded rq, check if we need 668 * Queueing this task back might have overloaded rq, check if we need
669 * to kick someone away. 669 * to kick someone away.
670 */ 670 */
671 if (has_pushable_dl_tasks(rq)) 671 if (has_pushable_dl_tasks(rq)) {
672 /*
673 * Nothing relies on rq->lock after this, so its safe to drop
674 * rq->lock.
675 */
676 lockdep_unpin_lock(&rq->lock);
672 push_dl_task(rq); 677 push_dl_task(rq);
678 lockdep_pin_lock(&rq->lock);
679 }
673#endif 680#endif
674 681
675unlock: 682unlock:
@@ -1066,8 +1073,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1066 int target = find_later_rq(p); 1073 int target = find_later_rq(p);
1067 1074
1068 if (target != -1 && 1075 if (target != -1 &&
1069 dl_time_before(p->dl.deadline, 1076 (dl_time_before(p->dl.deadline,
1070 cpu_rq(target)->dl.earliest_dl.curr)) 1077 cpu_rq(target)->dl.earliest_dl.curr) ||
1078 (cpu_rq(target)->dl.dl_nr_running == 0)))
1071 cpu = target; 1079 cpu = target;
1072 } 1080 }
1073 rcu_read_unlock(); 1081 rcu_read_unlock();
@@ -1417,7 +1425,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1417 1425
1418 later_rq = cpu_rq(cpu); 1426 later_rq = cpu_rq(cpu);
1419 1427
1420 if (!dl_time_before(task->dl.deadline, 1428 if (later_rq->dl.dl_nr_running &&
1429 !dl_time_before(task->dl.deadline,
1421 later_rq->dl.earliest_dl.curr)) { 1430 later_rq->dl.earliest_dl.curr)) {
1422 /* 1431 /*
1423 * Target rq has tasks of equal or earlier deadline, 1432 * Target rq has tasks of equal or earlier deadline,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6e2e3483b1ec..9a5e60fe721a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2363,7 +2363,7 @@ static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2363 */ 2363 */
2364 tg_weight = atomic_long_read(&tg->load_avg); 2364 tg_weight = atomic_long_read(&tg->load_avg);
2365 tg_weight -= cfs_rq->tg_load_avg_contrib; 2365 tg_weight -= cfs_rq->tg_load_avg_contrib;
2366 tg_weight += cfs_rq_load_avg(cfs_rq); 2366 tg_weight += cfs_rq->load.weight;
2367 2367
2368 return tg_weight; 2368 return tg_weight;
2369} 2369}
@@ -2373,7 +2373,7 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2373 long tg_weight, load, shares; 2373 long tg_weight, load, shares;
2374 2374
2375 tg_weight = calc_tg_weight(tg, cfs_rq); 2375 tg_weight = calc_tg_weight(tg, cfs_rq);
2376 load = cfs_rq_load_avg(cfs_rq); 2376 load = cfs_rq->load.weight;
2377 2377
2378 shares = (tg->shares * load); 2378 shares = (tg->shares * load);
2379 if (tg_weight) 2379 if (tg_weight)
@@ -2664,13 +2664,14 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2664/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */ 2664/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
2665static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 2665static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2666{ 2666{
2667 int decayed;
2668 struct sched_avg *sa = &cfs_rq->avg; 2667 struct sched_avg *sa = &cfs_rq->avg;
2668 int decayed, removed = 0;
2669 2669
2670 if (atomic_long_read(&cfs_rq->removed_load_avg)) { 2670 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
2671 long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); 2671 long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
2672 sa->load_avg = max_t(long, sa->load_avg - r, 0); 2672 sa->load_avg = max_t(long, sa->load_avg - r, 0);
2673 sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); 2673 sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
2674 removed = 1;
2674 } 2675 }
2675 2676
2676 if (atomic_long_read(&cfs_rq->removed_util_avg)) { 2677 if (atomic_long_read(&cfs_rq->removed_util_avg)) {
@@ -2688,7 +2689,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2688 cfs_rq->load_last_update_time_copy = sa->last_update_time; 2689 cfs_rq->load_last_update_time_copy = sa->last_update_time;
2689#endif 2690#endif
2690 2691
2691 return decayed; 2692 return decayed || removed;
2692} 2693}
2693 2694
2694/* Update task and its cfs_rq load average */ 2695/* Update task and its cfs_rq load average */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 8f177c73ae19..4a2ef5a02fd3 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -57,9 +57,11 @@ static inline int cpu_idle_poll(void)
57 rcu_idle_enter(); 57 rcu_idle_enter();
58 trace_cpu_idle_rcuidle(0, smp_processor_id()); 58 trace_cpu_idle_rcuidle(0, smp_processor_id());
59 local_irq_enable(); 59 local_irq_enable();
60 stop_critical_timings();
60 while (!tif_need_resched() && 61 while (!tif_need_resched() &&
61 (cpu_idle_force_poll || tick_check_broadcast_expired())) 62 (cpu_idle_force_poll || tick_check_broadcast_expired()))
62 cpu_relax(); 63 cpu_relax();
64 start_critical_timings();
63 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 65 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
64 rcu_idle_exit(); 66 rcu_idle_exit();
65 return 1; 67 return 1;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index b746399ab59c..8abf1ba18085 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -85,9 +85,19 @@ check_stack(unsigned long ip, unsigned long *stack)
85 if (!object_is_on_stack(stack)) 85 if (!object_is_on_stack(stack))
86 return; 86 return;
87 87
88 /* Can't do this from NMI context (can cause deadlocks) */
89 if (in_nmi())
90 return;
91
88 local_irq_save(flags); 92 local_irq_save(flags);
89 arch_spin_lock(&max_stack_lock); 93 arch_spin_lock(&max_stack_lock);
90 94
95 /*
96 * RCU may not be watching, make it see us.
97 * The stack trace code uses rcu_sched.
98 */
99 rcu_irq_enter();
100
91 /* In case another CPU set the tracer_frame on us */ 101 /* In case another CPU set the tracer_frame on us */
92 if (unlikely(!frame_size)) 102 if (unlikely(!frame_size))
93 this_size -= tracer_frame; 103 this_size -= tracer_frame;
@@ -169,6 +179,7 @@ check_stack(unsigned long ip, unsigned long *stack)
169 } 179 }
170 180
171 out: 181 out:
182 rcu_irq_exit();
172 arch_spin_unlock(&max_stack_lock); 183 arch_spin_unlock(&max_stack_lock);
173 local_irq_restore(flags); 184 local_irq_restore(flags);
174} 185}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ab76b99adc85..1d1521c26302 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -197,6 +197,7 @@ config ENABLE_MUST_CHECK
197config FRAME_WARN 197config FRAME_WARN
198 int "Warn for stack frames larger than (needs gcc 4.4)" 198 int "Warn for stack frames larger than (needs gcc 4.4)"
199 range 0 8192 199 range 0 8192
200 default 0 if KASAN
200 default 1024 if !64BIT 201 default 1024 if !64BIT
201 default 2048 if 64BIT 202 default 2048 if 64BIT
202 help 203 help
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index f1cdeb024d17..6a823a53e357 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -44,7 +44,7 @@ static void fail_dump(struct fault_attr *attr)
44 printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" 44 printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
45 "name %pd, interval %lu, probability %lu, " 45 "name %pd, interval %lu, probability %lu, "
46 "space %d, times %d\n", attr->dname, 46 "space %d, times %d\n", attr->dname,
47 attr->probability, attr->interval, 47 attr->interval, attr->probability,
48 atomic_read(&attr->space), 48 atomic_read(&attr->space),
49 atomic_read(&attr->times)); 49 atomic_read(&attr->times));
50 if (attr->verbose > 1) 50 if (attr->verbose > 1)
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 2df8ddcb0ca0..619984fc07ec 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -480,6 +480,10 @@ static void cgwb_release_workfn(struct work_struct *work)
480 release_work); 480 release_work);
481 struct backing_dev_info *bdi = wb->bdi; 481 struct backing_dev_info *bdi = wb->bdi;
482 482
483 spin_lock_irq(&cgwb_lock);
484 list_del_rcu(&wb->bdi_node);
485 spin_unlock_irq(&cgwb_lock);
486
483 wb_shutdown(wb); 487 wb_shutdown(wb);
484 488
485 css_put(wb->memcg_css); 489 css_put(wb->memcg_css);
@@ -575,6 +579,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
575 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); 579 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
576 if (!ret) { 580 if (!ret) {
577 atomic_inc(&bdi->usage_cnt); 581 atomic_inc(&bdi->usage_cnt);
582 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
578 list_add(&wb->memcg_node, memcg_cgwb_list); 583 list_add(&wb->memcg_node, memcg_cgwb_list);
579 list_add(&wb->blkcg_node, blkcg_cgwb_list); 584 list_add(&wb->blkcg_node, blkcg_cgwb_list);
580 css_get(memcg_css); 585 css_get(memcg_css);
@@ -676,7 +681,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
676static void cgwb_bdi_destroy(struct backing_dev_info *bdi) 681static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
677{ 682{
678 struct radix_tree_iter iter; 683 struct radix_tree_iter iter;
679 struct bdi_writeback_congested *congested, *congested_n; 684 struct rb_node *rbn;
680 void **slot; 685 void **slot;
681 686
682 WARN_ON(test_bit(WB_registered, &bdi->wb.state)); 687 WARN_ON(test_bit(WB_registered, &bdi->wb.state));
@@ -686,9 +691,11 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
686 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) 691 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
687 cgwb_kill(*slot); 692 cgwb_kill(*slot);
688 693
689 rbtree_postorder_for_each_entry_safe(congested, congested_n, 694 while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
690 &bdi->cgwb_congested_tree, rb_node) { 695 struct bdi_writeback_congested *congested =
691 rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree); 696 rb_entry(rbn, struct bdi_writeback_congested, rb_node);
697
698 rb_erase(rbn, &bdi->cgwb_congested_tree);
692 congested->bdi = NULL; /* mark @congested unlinked */ 699 congested->bdi = NULL; /* mark @congested unlinked */
693 } 700 }
694 701
@@ -764,15 +771,22 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
764 771
765int bdi_init(struct backing_dev_info *bdi) 772int bdi_init(struct backing_dev_info *bdi)
766{ 773{
774 int ret;
775
767 bdi->dev = NULL; 776 bdi->dev = NULL;
768 777
769 bdi->min_ratio = 0; 778 bdi->min_ratio = 0;
770 bdi->max_ratio = 100; 779 bdi->max_ratio = 100;
771 bdi->max_prop_frac = FPROP_FRAC_BASE; 780 bdi->max_prop_frac = FPROP_FRAC_BASE;
772 INIT_LIST_HEAD(&bdi->bdi_list); 781 INIT_LIST_HEAD(&bdi->bdi_list);
782 INIT_LIST_HEAD(&bdi->wb_list);
773 init_waitqueue_head(&bdi->wb_waitq); 783 init_waitqueue_head(&bdi->wb_waitq);
774 784
775 return cgwb_bdi_init(bdi); 785 ret = cgwb_bdi_init(bdi);
786
787 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
788
789 return ret;
776} 790}
777EXPORT_SYMBOL(bdi_init); 791EXPORT_SYMBOL(bdi_init);
778 792
@@ -823,7 +837,7 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi)
823 synchronize_rcu_expedited(); 837 synchronize_rcu_expedited();
824} 838}
825 839
826void bdi_destroy(struct backing_dev_info *bdi) 840void bdi_unregister(struct backing_dev_info *bdi)
827{ 841{
828 /* make sure nobody finds us on the bdi_list anymore */ 842 /* make sure nobody finds us on the bdi_list anymore */
829 bdi_remove_from_list(bdi); 843 bdi_remove_from_list(bdi);
@@ -835,9 +849,19 @@ void bdi_destroy(struct backing_dev_info *bdi)
835 device_unregister(bdi->dev); 849 device_unregister(bdi->dev);
836 bdi->dev = NULL; 850 bdi->dev = NULL;
837 } 851 }
852}
838 853
854void bdi_exit(struct backing_dev_info *bdi)
855{
856 WARN_ON_ONCE(bdi->dev);
839 wb_exit(&bdi->wb); 857 wb_exit(&bdi->wb);
840} 858}
859
860void bdi_destroy(struct backing_dev_info *bdi)
861{
862 bdi_unregister(bdi);
863 bdi_exit(bdi);
864}
841EXPORT_SYMBOL(bdi_destroy); 865EXPORT_SYMBOL(bdi_destroy);
842 866
843/* 867/*
diff --git a/mm/cma.c b/mm/cma.c
index e7d1db533025..4eb56badf37e 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -361,7 +361,7 @@ err:
361 * This function allocates part of contiguous memory on specific 361 * This function allocates part of contiguous memory on specific
362 * contiguous memory area. 362 * contiguous memory area.
363 */ 363 */
364struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) 364struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
365{ 365{
366 unsigned long mask, offset, pfn, start = 0; 366 unsigned long mask, offset, pfn, start = 0;
367 unsigned long bitmap_maxno, bitmap_no, bitmap_count; 367 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
@@ -371,7 +371,7 @@ struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
371 if (!cma || !cma->count) 371 if (!cma || !cma->count)
372 return NULL; 372 return NULL;
373 373
374 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, 374 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
375 count, align); 375 count, align);
376 376
377 if (!count) 377 if (!count)
diff --git a/mm/filemap.c b/mm/filemap.c
index 1cc5467cf36c..327910c2400c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2488,6 +2488,11 @@ again:
2488 break; 2488 break;
2489 } 2489 }
2490 2490
2491 if (fatal_signal_pending(current)) {
2492 status = -EINTR;
2493 break;
2494 }
2495
2491 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2496 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2492 &page, &fsdata); 2497 &page, &fsdata);
2493 if (unlikely(status < 0)) 2498 if (unlikely(status < 0))
@@ -2525,10 +2530,6 @@ again:
2525 written += copied; 2530 written += copied;
2526 2531
2527 balance_dirty_pages_ratelimited(mapping); 2532 balance_dirty_pages_ratelimited(mapping);
2528 if (fatal_signal_pending(current)) {
2529 status = -EINTR;
2530 break;
2531 }
2532 } while (iov_iter_count(i)); 2533 } while (iov_iter_count(i));
2533 2534
2534 return written ? written : status; 2535 return written ? written : status;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4b06b8db9df2..bbac913f96bc 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2206,7 +2206,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2206 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 2206 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
2207 _pte++, address += PAGE_SIZE) { 2207 _pte++, address += PAGE_SIZE) {
2208 pte_t pteval = *_pte; 2208 pte_t pteval = *_pte;
2209 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 2209 if (pte_none(pteval) || (pte_present(pteval) &&
2210 is_zero_pfn(pte_pfn(pteval)))) {
2210 if (!userfaultfd_armed(vma) && 2211 if (!userfaultfd_armed(vma) &&
2211 ++none_or_zero <= khugepaged_max_ptes_none) 2212 ++none_or_zero <= khugepaged_max_ptes_none)
2212 continue; 2213 continue;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d9b5c817dce8..c57c4423c688 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3741,44 +3741,43 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3741/** 3741/**
3742 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3742 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3743 * @wb: bdi_writeback in question 3743 * @wb: bdi_writeback in question
3744 * @pavail: out parameter for number of available pages 3744 * @pfilepages: out parameter for number of file pages
3745 * @pheadroom: out parameter for number of allocatable pages according to memcg
3745 * @pdirty: out parameter for number of dirty pages 3746 * @pdirty: out parameter for number of dirty pages
3746 * @pwriteback: out parameter for number of pages under writeback 3747 * @pwriteback: out parameter for number of pages under writeback
3747 * 3748 *
3748 * Determine the numbers of available, dirty, and writeback pages in @wb's 3749 * Determine the numbers of file, headroom, dirty, and writeback pages in
3749 * memcg. Dirty and writeback are self-explanatory. Available is a bit 3750 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3750 * more involved. 3751 * is a bit more involved.
3751 * 3752 *
3752 * A memcg's headroom is "min(max, high) - used". The available memory is 3753 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3753 * calculated as the lowest headroom of itself and the ancestors plus the 3754 * headroom is calculated as the lowest headroom of itself and the
3754 * number of pages already being used for file pages. Note that this 3755 * ancestors. Note that this doesn't consider the actual amount of
3755 * doesn't consider the actual amount of available memory in the system. 3756 * available memory in the system. The caller should further cap
3756 * The caller should further cap *@pavail accordingly. 3757 * *@pheadroom accordingly.
3757 */ 3758 */
3758void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, 3759void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3759 unsigned long *pdirty, unsigned long *pwriteback) 3760 unsigned long *pheadroom, unsigned long *pdirty,
3761 unsigned long *pwriteback)
3760{ 3762{
3761 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3763 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3762 struct mem_cgroup *parent; 3764 struct mem_cgroup *parent;
3763 unsigned long head_room = PAGE_COUNTER_MAX;
3764 unsigned long file_pages;
3765 3765
3766 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); 3766 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3767 3767
3768 /* this should eventually include NR_UNSTABLE_NFS */ 3768 /* this should eventually include NR_UNSTABLE_NFS */
3769 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 3769 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3770 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3771 (1 << LRU_ACTIVE_FILE));
3772 *pheadroom = PAGE_COUNTER_MAX;
3770 3773
3771 file_pages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3772 (1 << LRU_ACTIVE_FILE));
3773 while ((parent = parent_mem_cgroup(memcg))) { 3774 while ((parent = parent_mem_cgroup(memcg))) {
3774 unsigned long ceiling = min(memcg->memory.limit, memcg->high); 3775 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3775 unsigned long used = page_counter_read(&memcg->memory); 3776 unsigned long used = page_counter_read(&memcg->memory);
3776 3777
3777 head_room = min(head_room, ceiling - min(ceiling, used)); 3778 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3778 memcg = parent; 3779 memcg = parent;
3779 } 3780 }
3780
3781 *pavail = file_pages + head_room;
3782} 3781}
3783 3782
3784#else /* CONFIG_CGROUP_WRITEBACK */ 3783#else /* CONFIG_CGROUP_WRITEBACK */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0a931cdd4f6b..2c90357c34ea 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -145,9 +145,6 @@ struct dirty_throttle_control {
145 unsigned long pos_ratio; 145 unsigned long pos_ratio;
146}; 146};
147 147
148#define DTC_INIT_COMMON(__wb) .wb = (__wb), \
149 .wb_completions = &(__wb)->completions
150
151/* 148/*
152 * Length of period for aging writeout fractions of bdis. This is an 149 * Length of period for aging writeout fractions of bdis. This is an
153 * arbitrarily chosen number. The longer the period, the slower fractions will 150 * arbitrarily chosen number. The longer the period, the slower fractions will
@@ -157,12 +154,16 @@ struct dirty_throttle_control {
157 154
158#ifdef CONFIG_CGROUP_WRITEBACK 155#ifdef CONFIG_CGROUP_WRITEBACK
159 156
160#define GDTC_INIT(__wb) .dom = &global_wb_domain, \ 157#define GDTC_INIT(__wb) .wb = (__wb), \
161 DTC_INIT_COMMON(__wb) 158 .dom = &global_wb_domain, \
159 .wb_completions = &(__wb)->completions
160
162#define GDTC_INIT_NO_WB .dom = &global_wb_domain 161#define GDTC_INIT_NO_WB .dom = &global_wb_domain
163#define MDTC_INIT(__wb, __gdtc) .dom = mem_cgroup_wb_domain(__wb), \ 162
164 .gdtc = __gdtc, \ 163#define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
165 DTC_INIT_COMMON(__wb) 164 .dom = mem_cgroup_wb_domain(__wb), \
165 .wb_completions = &(__wb)->memcg_completions, \
166 .gdtc = __gdtc
166 167
167static bool mdtc_valid(struct dirty_throttle_control *dtc) 168static bool mdtc_valid(struct dirty_throttle_control *dtc)
168{ 169{
@@ -213,7 +214,8 @@ static void wb_min_max_ratio(struct bdi_writeback *wb,
213 214
214#else /* CONFIG_CGROUP_WRITEBACK */ 215#else /* CONFIG_CGROUP_WRITEBACK */
215 216
216#define GDTC_INIT(__wb) DTC_INIT_COMMON(__wb) 217#define GDTC_INIT(__wb) .wb = (__wb), \
218 .wb_completions = &(__wb)->completions
217#define GDTC_INIT_NO_WB 219#define GDTC_INIT_NO_WB
218#define MDTC_INIT(__wb, __gdtc) 220#define MDTC_INIT(__wb, __gdtc)
219 221
@@ -682,13 +684,19 @@ static unsigned long hard_dirty_limit(struct wb_domain *dom,
682 return max(thresh, dom->dirty_limit); 684 return max(thresh, dom->dirty_limit);
683} 685}
684 686
685/* memory available to a memcg domain is capped by system-wide clean memory */ 687/*
686static void mdtc_cap_avail(struct dirty_throttle_control *mdtc) 688 * Memory which can be further allocated to a memcg domain is capped by
689 * system-wide clean memory excluding the amount being used in the domain.
690 */
691static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
692 unsigned long filepages, unsigned long headroom)
687{ 693{
688 struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc); 694 struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
689 unsigned long clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); 695 unsigned long clean = filepages - min(filepages, mdtc->dirty);
696 unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
697 unsigned long other_clean = global_clean - min(global_clean, clean);
690 698
691 mdtc->avail = min(mdtc->avail, clean); 699 mdtc->avail = filepages + min(headroom, other_clean);
692} 700}
693 701
694/** 702/**
@@ -1562,16 +1570,16 @@ static void balance_dirty_pages(struct address_space *mapping,
1562 } 1570 }
1563 1571
1564 if (mdtc) { 1572 if (mdtc) {
1565 unsigned long writeback; 1573 unsigned long filepages, headroom, writeback;
1566 1574
1567 /* 1575 /*
1568 * If @wb belongs to !root memcg, repeat the same 1576 * If @wb belongs to !root memcg, repeat the same
1569 * basic calculations for the memcg domain. 1577 * basic calculations for the memcg domain.
1570 */ 1578 */
1571 mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, 1579 mem_cgroup_wb_stats(wb, &filepages, &headroom,
1572 &writeback); 1580 &mdtc->dirty, &writeback);
1573 mdtc_cap_avail(mdtc);
1574 mdtc->dirty += writeback; 1581 mdtc->dirty += writeback;
1582 mdtc_calc_avail(mdtc, filepages, headroom);
1575 1583
1576 domain_dirty_limits(mdtc); 1584 domain_dirty_limits(mdtc);
1577 1585
@@ -1893,10 +1901,11 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
1893 return true; 1901 return true;
1894 1902
1895 if (mdtc) { 1903 if (mdtc) {
1896 unsigned long writeback; 1904 unsigned long filepages, headroom, writeback;
1897 1905
1898 mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, &writeback); 1906 mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
1899 mdtc_cap_avail(mdtc); 1907 &writeback);
1908 mdtc_calc_avail(mdtc, filepages, headroom);
1900 domain_dirty_limits(mdtc); /* ditto, ignore writeback */ 1909 domain_dirty_limits(mdtc); /* ditto, ignore writeback */
1901 1910
1902 if (mdtc->dirty > mdtc->bg_thresh) 1911 if (mdtc->dirty > mdtc->bg_thresh)
@@ -1956,7 +1965,6 @@ void laptop_mode_timer_fn(unsigned long data)
1956 int nr_pages = global_page_state(NR_FILE_DIRTY) + 1965 int nr_pages = global_page_state(NR_FILE_DIRTY) +
1957 global_page_state(NR_UNSTABLE_NFS); 1966 global_page_state(NR_UNSTABLE_NFS);
1958 struct bdi_writeback *wb; 1967 struct bdi_writeback *wb;
1959 struct wb_iter iter;
1960 1968
1961 /* 1969 /*
1962 * We want to write everything out, not just down to the dirty 1970 * We want to write everything out, not just down to the dirty
@@ -1965,10 +1973,12 @@ void laptop_mode_timer_fn(unsigned long data)
1965 if (!bdi_has_dirty_io(&q->backing_dev_info)) 1973 if (!bdi_has_dirty_io(&q->backing_dev_info))
1966 return; 1974 return;
1967 1975
1968 bdi_for_each_wb(wb, &q->backing_dev_info, &iter, 0) 1976 rcu_read_lock();
1977 list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node)
1969 if (wb_has_dirty_io(wb)) 1978 if (wb_has_dirty_io(wb))
1970 wb_start_writeback(wb, nr_pages, true, 1979 wb_start_writeback(wb, nr_pages, true,
1971 WB_REASON_LAPTOP_TIMER); 1980 WB_REASON_LAPTOP_TIMER);
1981 rcu_read_unlock();
1972} 1982}
1973 1983
1974/* 1984/*
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b4548c739a64..2dda439c8cb8 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -91,10 +91,50 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
91 * autoconnect action, remove them completely. If they are, just unmark 91 * autoconnect action, remove them completely. If they are, just unmark
92 * them as waiting for connection, by clearing explicit_connect field. 92 * them as waiting for connection, by clearing explicit_connect field.
93 */ 93 */
94 if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT) 94 params->explicit_connect = false;
95
96 list_del_init(&params->action);
97
98 switch (params->auto_connect) {
99 case HCI_AUTO_CONN_EXPLICIT:
95 hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type); 100 hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
96 else 101 /* return instead of break to avoid duplicate scan update */
97 params->explicit_connect = false; 102 return;
103 case HCI_AUTO_CONN_DIRECT:
104 case HCI_AUTO_CONN_ALWAYS:
105 list_add(&params->action, &conn->hdev->pend_le_conns);
106 break;
107 case HCI_AUTO_CONN_REPORT:
108 list_add(&params->action, &conn->hdev->pend_le_reports);
109 break;
110 default:
111 break;
112 }
113
114 hci_update_background_scan(conn->hdev);
115}
116
117static void hci_conn_cleanup(struct hci_conn *conn)
118{
119 struct hci_dev *hdev = conn->hdev;
120
121 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
122 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
123
124 hci_chan_list_flush(conn);
125
126 hci_conn_hash_del(hdev, conn);
127
128 if (hdev->notify)
129 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
130
131 hci_conn_del_sysfs(conn);
132
133 debugfs_remove_recursive(conn->debugfs);
134
135 hci_dev_put(hdev);
136
137 hci_conn_put(conn);
98} 138}
99 139
100/* This function requires the caller holds hdev->lock */ 140/* This function requires the caller holds hdev->lock */
@@ -102,8 +142,13 @@ static void hci_connect_le_scan_remove(struct hci_conn *conn)
102{ 142{
103 hci_connect_le_scan_cleanup(conn); 143 hci_connect_le_scan_cleanup(conn);
104 144
105 hci_conn_hash_del(conn->hdev, conn); 145 /* We can't call hci_conn_del here since that would deadlock
106 hci_update_background_scan(conn->hdev); 146 * with trying to call cancel_delayed_work_sync(&conn->disc_work).
147 * Instead, call just hci_conn_cleanup() which contains the bare
148 * minimum cleanup operations needed for a connection in this
149 * state.
150 */
151 hci_conn_cleanup(conn);
107} 152}
108 153
109static void hci_acl_create_connection(struct hci_conn *conn) 154static void hci_acl_create_connection(struct hci_conn *conn)
@@ -581,27 +626,17 @@ int hci_conn_del(struct hci_conn *conn)
581 } 626 }
582 } 627 }
583 628
584 hci_chan_list_flush(conn);
585
586 if (conn->amp_mgr) 629 if (conn->amp_mgr)
587 amp_mgr_put(conn->amp_mgr); 630 amp_mgr_put(conn->amp_mgr);
588 631
589 hci_conn_hash_del(hdev, conn);
590 if (hdev->notify)
591 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
592
593 skb_queue_purge(&conn->data_q); 632 skb_queue_purge(&conn->data_q);
594 633
595 hci_conn_del_sysfs(conn); 634 /* Remove the connection from the list and cleanup its remaining
596 635 * state. This is a separate function since for some cases like
597 debugfs_remove_recursive(conn->debugfs); 636 * BT_CONNECT_SCAN we *only* want the cleanup part without the
598 637 * rest of hci_conn_del.
599 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) 638 */
600 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); 639 hci_conn_cleanup(conn);
601
602 hci_dev_put(hdev);
603
604 hci_conn_put(conn);
605 640
606 return 0; 641 return 0;
607} 642}
@@ -973,15 +1008,23 @@ static int hci_explicit_conn_params_set(struct hci_request *req,
973 if (is_connected(hdev, addr, addr_type)) 1008 if (is_connected(hdev, addr, addr_type))
974 return -EISCONN; 1009 return -EISCONN;
975 1010
976 params = hci_conn_params_add(hdev, addr, addr_type); 1011 params = hci_conn_params_lookup(hdev, addr, addr_type);
977 if (!params) 1012 if (!params) {
978 return -EIO; 1013 params = hci_conn_params_add(hdev, addr, addr_type);
1014 if (!params)
1015 return -ENOMEM;
979 1016
980 /* If we created new params, or existing params were marked as disabled, 1017 /* If we created new params, mark them to be deleted in
981 * mark them to be used just once to connect. 1018 * hci_connect_le_scan_cleanup. It's different case than
982 */ 1019 * existing disabled params, those will stay after cleanup.
983 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) { 1020 */
984 params->auto_connect = HCI_AUTO_CONN_EXPLICIT; 1021 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1022 }
1023
1024 /* We're trying to connect, so make sure params are at pend_le_conns */
1025 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1026 params->auto_connect == HCI_AUTO_CONN_REPORT ||
1027 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
985 list_del_init(&params->action); 1028 list_del_init(&params->action);
986 list_add(&params->action, &hdev->pend_le_conns); 1029 list_add(&params->action, &hdev->pend_le_conns);
987 } 1030 }
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index adcbc74c2432..e837539452fb 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2861,13 +2861,6 @@ struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2861 return param; 2861 return param;
2862 } 2862 }
2863 2863
2864 list_for_each_entry(param, &hdev->pend_le_reports, action) {
2865 if (bacmp(&param->addr, addr) == 0 &&
2866 param->addr_type == addr_type &&
2867 param->explicit_connect)
2868 return param;
2869 }
2870
2871 return NULL; 2864 return NULL;
2872} 2865}
2873 2866
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 186041866315..bc31099d3b5b 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -55,7 +55,12 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
55 wake_up_bit(&hdev->flags, HCI_INQUIRY); 55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
56 56
57 hci_dev_lock(hdev); 57 hci_dev_lock(hdev);
58 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 58 /* Set discovery state to stopped if we're not doing LE active
59 * scanning.
60 */
61 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
62 hdev->le_scan_type != LE_SCAN_ACTIVE)
63 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
59 hci_dev_unlock(hdev); 64 hci_dev_unlock(hdev);
60 65
61 hci_conn_check_pending(hdev); 66 hci_conn_check_pending(hdev);
@@ -4648,8 +4653,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4648 /* If we're not connectable only connect devices that we have in 4653 /* If we're not connectable only connect devices that we have in
4649 * our pend_le_conns list. 4654 * our pend_le_conns list.
4650 */ 4655 */
4651 params = hci_explicit_connect_lookup(hdev, addr, addr_type); 4656 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
4652 4657 addr_type);
4653 if (!params) 4658 if (!params)
4654 return NULL; 4659 return NULL;
4655 4660
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index ccaf5a436d8f..c4fe2fee753f 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -3545,6 +3545,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3545 auth_type); 3545 auth_type);
3546 } else { 3546 } else {
3547 u8 addr_type; 3547 u8 addr_type;
3548 struct hci_conn_params *p;
3548 3549
3549 /* Convert from L2CAP channel address type to HCI address type 3550 /* Convert from L2CAP channel address type to HCI address type
3550 */ 3551 */
@@ -3562,7 +3563,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3562 * If connection parameters already exist, then they 3563 * If connection parameters already exist, then they
3563 * will be kept and this function does nothing. 3564 * will be kept and this function does nothing.
3564 */ 3565 */
3565 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type); 3566 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3567
3568 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3569 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3566 3570
3567 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, 3571 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
3568 addr_type, sec_level, 3572 addr_type, sec_level,
@@ -6117,14 +6121,21 @@ static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
6117 __hci_update_background_scan(req); 6121 __hci_update_background_scan(req);
6118 break; 6122 break;
6119 case HCI_AUTO_CONN_REPORT: 6123 case HCI_AUTO_CONN_REPORT:
6120 list_add(&params->action, &hdev->pend_le_reports); 6124 if (params->explicit_connect)
6125 list_add(&params->action, &hdev->pend_le_conns);
6126 else
6127 list_add(&params->action, &hdev->pend_le_reports);
6121 __hci_update_background_scan(req); 6128 __hci_update_background_scan(req);
6122 break; 6129 break;
6123 case HCI_AUTO_CONN_DIRECT: 6130 case HCI_AUTO_CONN_DIRECT:
6124 case HCI_AUTO_CONN_ALWAYS: 6131 case HCI_AUTO_CONN_ALWAYS:
6125 if (!is_connected(hdev, addr, addr_type)) { 6132 if (!is_connected(hdev, addr, addr_type)) {
6126 list_add(&params->action, &hdev->pend_le_conns); 6133 list_add(&params->action, &hdev->pend_le_conns);
6127 __hci_update_background_scan(req); 6134 /* If we are in scan phase of connecting, we were
6135 * already added to pend_le_conns and scanning.
6136 */
6137 if (params->auto_connect != HCI_AUTO_CONN_EXPLICIT)
6138 __hci_update_background_scan(req);
6128 } 6139 }
6129 break; 6140 break;
6130 } 6141 }
@@ -6379,7 +6390,8 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
6379 goto unlock; 6390 goto unlock;
6380 } 6391 }
6381 6392
6382 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) { 6393 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6394 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6383 err = cmd->cmd_complete(cmd, 6395 err = cmd->cmd_complete(cmd,
6384 MGMT_STATUS_INVALID_PARAMS); 6396 MGMT_STATUS_INVALID_PARAMS);
6385 mgmt_pending_remove(cmd); 6397 mgmt_pending_remove(cmd);
@@ -6415,6 +6427,10 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
6415 if (p->auto_connect == HCI_AUTO_CONN_DISABLED) 6427 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6416 continue; 6428 continue;
6417 device_removed(sk, hdev, &p->addr, p->addr_type); 6429 device_removed(sk, hdev, &p->addr, p->addr_type);
6430 if (p->explicit_connect) {
6431 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6432 continue;
6433 }
6418 list_del(&p->action); 6434 list_del(&p->action);
6419 list_del(&p->list); 6435 list_del(&p->list);
6420 kfree(p); 6436 kfree(p);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index b495ab1797fa..29edf74846fc 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1284,7 +1284,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
1284 1284
1285 gstrings.len = ret; 1285 gstrings.len = ret;
1286 1286
1287 data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); 1287 data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
1288 if (!data) 1288 if (!data)
1289 return -ENOMEM; 1289 return -ENOMEM;
1290 1290
diff --git a/net/core/filter.c b/net/core/filter.c
index 05a04ea87172..bb18c3680001 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1415,6 +1415,7 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
1415 return dev_forward_skb(dev, skb2); 1415 return dev_forward_skb(dev, skb2);
1416 1416
1417 skb2->dev = dev; 1417 skb2->dev = dev;
1418 skb_sender_cpu_clear(skb2);
1418 return dev_queue_xmit(skb2); 1419 return dev_queue_xmit(skb2);
1419} 1420}
1420 1421
@@ -1854,9 +1855,13 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1854 goto out; 1855 goto out;
1855 1856
1856 /* We're copying the filter that has been originally attached, 1857 /* We're copying the filter that has been originally attached,
1857 * so no conversion/decode needed anymore. 1858 * so no conversion/decode needed anymore. eBPF programs that
1859 * have no original program cannot be dumped through this.
1858 */ 1860 */
1861 ret = -EACCES;
1859 fprog = filter->prog->orig_prog; 1862 fprog = filter->prog->orig_prog;
1863 if (!fprog)
1864 goto out;
1860 1865
1861 ret = fprog->len; 1866 ret = fprog->len;
1862 if (!len) 1867 if (!len)
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index c59fa5d9c22c..adb5325f4934 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -22,6 +22,7 @@
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/of_net.h> 23#include <linux/of_net.h>
24#include <linux/sysfs.h> 24#include <linux/sysfs.h>
25#include <linux/phy_fixed.h>
25#include "dsa_priv.h" 26#include "dsa_priv.h"
26 27
27char dsa_driver_version[] = "0.1"; 28char dsa_driver_version[] = "0.1";
@@ -305,7 +306,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
305 if (ret < 0) 306 if (ret < 0)
306 goto out; 307 goto out;
307 308
308 ds->slave_mii_bus = mdiobus_alloc(); 309 ds->slave_mii_bus = devm_mdiobus_alloc(parent);
309 if (ds->slave_mii_bus == NULL) { 310 if (ds->slave_mii_bus == NULL) {
310 ret = -ENOMEM; 311 ret = -ENOMEM;
311 goto out; 312 goto out;
@@ -314,7 +315,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
314 315
315 ret = mdiobus_register(ds->slave_mii_bus); 316 ret = mdiobus_register(ds->slave_mii_bus);
316 if (ret < 0) 317 if (ret < 0)
317 goto out_free; 318 goto out;
318 319
319 320
320 /* 321 /*
@@ -367,10 +368,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
367 368
368 return ret; 369 return ret;
369 370
370out_free:
371 mdiobus_free(ds->slave_mii_bus);
372out: 371out:
373 kfree(ds);
374 return ret; 372 return ret;
375} 373}
376 374
@@ -400,7 +398,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
400 /* 398 /*
401 * Allocate and initialise switch state. 399 * Allocate and initialise switch state.
402 */ 400 */
403 ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL); 401 ds = devm_kzalloc(parent, sizeof(*ds) + drv->priv_size, GFP_KERNEL);
404 if (ds == NULL) 402 if (ds == NULL)
405 return ERR_PTR(-ENOMEM); 403 return ERR_PTR(-ENOMEM);
406 404
@@ -420,10 +418,47 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
420 418
421static void dsa_switch_destroy(struct dsa_switch *ds) 419static void dsa_switch_destroy(struct dsa_switch *ds)
422{ 420{
421 struct device_node *port_dn;
422 struct phy_device *phydev;
423 struct dsa_chip_data *cd = ds->pd;
424 int port;
425
423#ifdef CONFIG_NET_DSA_HWMON 426#ifdef CONFIG_NET_DSA_HWMON
424 if (ds->hwmon_dev) 427 if (ds->hwmon_dev)
425 hwmon_device_unregister(ds->hwmon_dev); 428 hwmon_device_unregister(ds->hwmon_dev);
426#endif 429#endif
430
431 /* Disable configuration of the CPU and DSA ports */
432 for (port = 0; port < DSA_MAX_PORTS; port++) {
433 if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
434 continue;
435
436 port_dn = cd->port_dn[port];
437 if (of_phy_is_fixed_link(port_dn)) {
438 phydev = of_phy_find_device(port_dn);
439 if (phydev) {
440 int addr = phydev->addr;
441
442 phy_device_free(phydev);
443 of_node_put(port_dn);
444 fixed_phy_del(addr);
445 }
446 }
447 }
448
449 /* Destroy network devices for physical switch ports. */
450 for (port = 0; port < DSA_MAX_PORTS; port++) {
451 if (!(ds->phys_port_mask & (1 << port)))
452 continue;
453
454 if (!ds->ports[port])
455 continue;
456
457 unregister_netdev(ds->ports[port]);
458 free_netdev(ds->ports[port]);
459 }
460
461 mdiobus_unregister(ds->slave_mii_bus);
427} 462}
428 463
429#ifdef CONFIG_PM_SLEEP 464#ifdef CONFIG_PM_SLEEP
@@ -802,10 +837,11 @@ static inline void dsa_of_remove(struct device *dev)
802} 837}
803#endif 838#endif
804 839
805static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, 840static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
806 struct device *parent, struct dsa_platform_data *pd) 841 struct device *parent, struct dsa_platform_data *pd)
807{ 842{
808 int i; 843 int i;
844 unsigned configured = 0;
809 845
810 dst->pd = pd; 846 dst->pd = pd;
811 dst->master_netdev = dev; 847 dst->master_netdev = dev;
@@ -825,9 +861,17 @@ static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
825 dst->ds[i] = ds; 861 dst->ds[i] = ds;
826 if (ds->drv->poll_link != NULL) 862 if (ds->drv->poll_link != NULL)
827 dst->link_poll_needed = 1; 863 dst->link_poll_needed = 1;
864
865 ++configured;
828 } 866 }
829 867
830 /* 868 /*
869 * If no switch was found, exit cleanly
870 */
871 if (!configured)
872 return -EPROBE_DEFER;
873
874 /*
831 * If we use a tagging format that doesn't have an ethertype 875 * If we use a tagging format that doesn't have an ethertype
832 * field, make sure that all packets from this point on get 876 * field, make sure that all packets from this point on get
833 * sent to the tag format's receive function. 877 * sent to the tag format's receive function.
@@ -843,6 +887,8 @@ static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
843 dst->link_poll_timer.expires = round_jiffies(jiffies + HZ); 887 dst->link_poll_timer.expires = round_jiffies(jiffies + HZ);
844 add_timer(&dst->link_poll_timer); 888 add_timer(&dst->link_poll_timer);
845 } 889 }
890
891 return 0;
846} 892}
847 893
848static int dsa_probe(struct platform_device *pdev) 894static int dsa_probe(struct platform_device *pdev)
@@ -883,7 +929,7 @@ static int dsa_probe(struct platform_device *pdev)
883 goto out; 929 goto out;
884 } 930 }
885 931
886 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 932 dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
887 if (dst == NULL) { 933 if (dst == NULL) {
888 dev_put(dev); 934 dev_put(dev);
889 ret = -ENOMEM; 935 ret = -ENOMEM;
@@ -892,7 +938,9 @@ static int dsa_probe(struct platform_device *pdev)
892 938
893 platform_set_drvdata(pdev, dst); 939 platform_set_drvdata(pdev, dst);
894 940
895 dsa_setup_dst(dst, dev, &pdev->dev, pd); 941 ret = dsa_setup_dst(dst, dev, &pdev->dev, pd);
942 if (ret)
943 goto out;
896 944
897 return 0; 945 return 0;
898 946
@@ -914,7 +962,7 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
914 for (i = 0; i < dst->pd->nr_chips; i++) { 962 for (i = 0; i < dst->pd->nr_chips; i++) {
915 struct dsa_switch *ds = dst->ds[i]; 963 struct dsa_switch *ds = dst->ds[i];
916 964
917 if (ds != NULL) 965 if (ds)
918 dsa_switch_destroy(ds); 966 dsa_switch_destroy(ds);
919 } 967 }
920} 968}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index f03db8b7abee..0c9c3482e419 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -312,7 +312,7 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip,
312 if (!skb) 312 if (!skb)
313 return; 313 return;
314 314
315 skb_dst_set(skb, dst); 315 skb_dst_set(skb, dst_clone(dst));
316 arp_xmit(skb); 316 arp_xmit(skb);
317} 317}
318 318
@@ -384,7 +384,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
384 } 384 }
385 385
386 if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 386 if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE))
387 dst = dst_clone(skb_dst(skb)); 387 dst = skb_dst(skb);
388 arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, 388 arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
389 dst_hw, dev->dev_addr, NULL, dst); 389 dst_hw, dev->dev_addr, NULL, dst);
390} 390}
@@ -811,7 +811,7 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
811 } else { 811 } else {
812 pneigh_enqueue(&arp_tbl, 812 pneigh_enqueue(&arp_tbl,
813 in_dev->arp_parms, skb); 813 in_dev->arp_parms, skb);
814 return 0; 814 goto out_free_dst;
815 } 815 }
816 goto out; 816 goto out;
817 } 817 }
@@ -865,6 +865,8 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
865 865
866out: 866out:
867 consume_skb(skb); 867 consume_skb(skb);
868out_free_dst:
869 dst_release(reply_dst);
868 return 0; 870 return 0;
869} 871}
870 872
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7bb9c39e0a4d..61b45a17fc73 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -577,21 +577,22 @@ EXPORT_SYMBOL(inet_rtx_syn_ack);
577static bool reqsk_queue_unlink(struct request_sock_queue *queue, 577static bool reqsk_queue_unlink(struct request_sock_queue *queue,
578 struct request_sock *req) 578 struct request_sock *req)
579{ 579{
580 struct listen_sock *lopt = queue->listen_opt;
581 struct request_sock **prev; 580 struct request_sock **prev;
581 struct listen_sock *lopt;
582 bool found = false; 582 bool found = false;
583 583
584 spin_lock(&queue->syn_wait_lock); 584 spin_lock(&queue->syn_wait_lock);
585 585 lopt = queue->listen_opt;
586 for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL; 586 if (lopt) {
587 prev = &(*prev)->dl_next) { 587 for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
588 if (*prev == req) { 588 prev = &(*prev)->dl_next) {
589 *prev = req->dl_next; 589 if (*prev == req) {
590 found = true; 590 *prev = req->dl_next;
591 break; 591 found = true;
592 break;
593 }
592 } 594 }
593 } 595 }
594
595 spin_unlock(&queue->syn_wait_lock); 596 spin_unlock(&queue->syn_wait_lock);
596 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) 597 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
597 reqsk_put(req); 598 reqsk_put(req);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 900113376d4e..36b85bd05ac8 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3119,6 +3119,8 @@ static void addrconf_gre_config(struct net_device *dev)
3119 } 3119 }
3120 3120
3121 addrconf_addr_gen(idev, true); 3121 addrconf_addr_gen(idev, true);
3122 if (dev->flags & IFF_POINTOPOINT)
3123 addrconf_add_mroute(dev);
3122} 3124}
3123#endif 3125#endif
3124 3126
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 92b1aa38f121..61d403ee1031 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -376,6 +376,9 @@ int ip6_forward(struct sk_buff *skb)
376 if (skb->pkt_type != PACKET_HOST) 376 if (skb->pkt_type != PACKET_HOST)
377 goto drop; 377 goto drop;
378 378
379 if (unlikely(skb->sk))
380 goto drop;
381
379 if (skb_warn_if_lro(skb)) 382 if (skb_warn_if_lro(skb))
380 goto drop; 383 goto drop;
381 384
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index cb32ce250db0..968f31c01f89 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -142,6 +142,9 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
142 struct net_device *loopback_dev = net->loopback_dev; 142 struct net_device *loopback_dev = net->loopback_dev;
143 int cpu; 143 int cpu;
144 144
145 if (dev == loopback_dev)
146 return;
147
145 for_each_possible_cpu(cpu) { 148 for_each_possible_cpu(cpu) {
146 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); 149 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
147 struct rt6_info *rt; 150 struct rt6_info *rt;
@@ -151,14 +154,12 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
151 struct inet6_dev *rt_idev = rt->rt6i_idev; 154 struct inet6_dev *rt_idev = rt->rt6i_idev;
152 struct net_device *rt_dev = rt->dst.dev; 155 struct net_device *rt_dev = rt->dst.dev;
153 156
154 if (rt_idev && (rt_idev->dev == dev || !dev) && 157 if (rt_idev->dev == dev) {
155 rt_idev->dev != loopback_dev) {
156 rt->rt6i_idev = in6_dev_get(loopback_dev); 158 rt->rt6i_idev = in6_dev_get(loopback_dev);
157 in6_dev_put(rt_idev); 159 in6_dev_put(rt_idev);
158 } 160 }
159 161
160 if (rt_dev && (rt_dev == dev || !dev) && 162 if (rt_dev == dev) {
161 rt_dev != loopback_dev) {
162 rt->dst.dev = loopback_dev; 163 rt->dst.dev = loopback_dev;
163 dev_hold(rt->dst.dev); 164 dev_hold(rt->dst.dev);
164 dev_put(rt_dev); 165 dev_put(rt_dev);
@@ -247,12 +248,6 @@ static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
247{ 248{
248} 249}
249 250
250static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
251 unsigned long old)
252{
253 return NULL;
254}
255
256static struct dst_ops ip6_dst_blackhole_ops = { 251static struct dst_ops ip6_dst_blackhole_ops = {
257 .family = AF_INET6, 252 .family = AF_INET6,
258 .destroy = ip6_dst_destroy, 253 .destroy = ip6_dst_destroy,
@@ -261,7 +256,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
261 .default_advmss = ip6_default_advmss, 256 .default_advmss = ip6_default_advmss,
262 .update_pmtu = ip6_rt_blackhole_update_pmtu, 257 .update_pmtu = ip6_rt_blackhole_update_pmtu,
263 .redirect = ip6_rt_blackhole_redirect, 258 .redirect = ip6_rt_blackhole_redirect,
264 .cow_metrics = ip6_rt_blackhole_cow_metrics, 259 .cow_metrics = dst_cow_metrics_generic,
265 .neigh_lookup = ip6_neigh_lookup, 260 .neigh_lookup = ip6_neigh_lookup,
266}; 261};
267 262
@@ -318,6 +313,15 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
318 313
319#endif 314#endif
320 315
316static void rt6_info_init(struct rt6_info *rt)
317{
318 struct dst_entry *dst = &rt->dst;
319
320 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
321 INIT_LIST_HEAD(&rt->rt6i_siblings);
322 INIT_LIST_HEAD(&rt->rt6i_uncached);
323}
324
321/* allocate dst with ip6_dst_ops */ 325/* allocate dst with ip6_dst_ops */
322static struct rt6_info *__ip6_dst_alloc(struct net *net, 326static struct rt6_info *__ip6_dst_alloc(struct net *net,
323 struct net_device *dev, 327 struct net_device *dev,
@@ -326,13 +330,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
326 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 330 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
327 0, DST_OBSOLETE_FORCE_CHK, flags); 331 0, DST_OBSOLETE_FORCE_CHK, flags);
328 332
329 if (rt) { 333 if (rt)
330 struct dst_entry *dst = &rt->dst; 334 rt6_info_init(rt);
331 335
332 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
333 INIT_LIST_HEAD(&rt->rt6i_siblings);
334 INIT_LIST_HEAD(&rt->rt6i_uncached);
335 }
336 return rt; 336 return rt;
337} 337}
338 338
@@ -1213,24 +1213,20 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
1213 1213
1214 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0); 1214 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1215 if (rt) { 1215 if (rt) {
1216 new = &rt->dst; 1216 rt6_info_init(rt);
1217
1218 memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
1219 1217
1218 new = &rt->dst;
1220 new->__use = 1; 1219 new->__use = 1;
1221 new->input = dst_discard; 1220 new->input = dst_discard;
1222 new->output = dst_discard_sk; 1221 new->output = dst_discard_sk;
1223 1222
1224 if (dst_metrics_read_only(&ort->dst)) 1223 dst_copy_metrics(new, &ort->dst);
1225 new->_metrics = ort->dst._metrics;
1226 else
1227 dst_copy_metrics(new, &ort->dst);
1228 rt->rt6i_idev = ort->rt6i_idev; 1224 rt->rt6i_idev = ort->rt6i_idev;
1229 if (rt->rt6i_idev) 1225 if (rt->rt6i_idev)
1230 in6_dev_hold(rt->rt6i_idev); 1226 in6_dev_hold(rt->rt6i_idev);
1231 1227
1232 rt->rt6i_gateway = ort->rt6i_gateway; 1228 rt->rt6i_gateway = ort->rt6i_gateway;
1233 rt->rt6i_flags = ort->rt6i_flags; 1229 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
1234 rt->rt6i_metric = 0; 1230 rt->rt6i_metric = 0;
1235 1231
1236 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); 1232 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
@@ -2622,7 +2618,8 @@ void rt6_ifdown(struct net *net, struct net_device *dev)
2622 2618
2623 fib6_clean_all(net, fib6_ifdown, &adn); 2619 fib6_clean_all(net, fib6_ifdown, &adn);
2624 icmp6_clean_all(fib6_ifdown, &adn); 2620 icmp6_clean_all(fib6_ifdown, &adn);
2625 rt6_uncached_list_flush_dev(net, dev); 2621 if (dev)
2622 rt6_uncached_list_flush_dev(net, dev);
2626} 2623}
2627 2624
2628struct rt6_mtu_change_arg { 2625struct rt6_mtu_change_arg {
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 30caa289c5db..5cedfda4b241 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -37,6 +37,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
37 37
38 memset(&fl6, 0, sizeof(fl6)); 38 memset(&fl6, 0, sizeof(fl6));
39 fl6.flowi6_oif = oif; 39 fl6.flowi6_oif = oif;
40 fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
40 memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr)); 41 memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
41 if (saddr) 42 if (saddr)
42 memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr)); 43 memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index ced6bf3be8d6..1560c8482bcb 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -149,7 +149,7 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
149 149
150 for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) { 150 for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) {
151 if (test_bit(i, local->hw.flags)) 151 if (test_bit(i, local->hw.flags))
152 pos += scnprintf(pos, end - pos, "%s", 152 pos += scnprintf(pos, end - pos, "%s\n",
153 hw_flag_names[i]); 153 hw_flag_names[i]);
154 } 154 }
155 155
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 8ba583243509..3ed7ddfbf8e8 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -101,6 +101,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
101 * when it wakes up for the next time. 101 * when it wakes up for the next time.
102 */ 102 */
103 set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT); 103 set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
104 ieee80211_clear_fast_xmit(sta);
104 105
105 /* 106 /*
106 * This code races in the following way: 107 * This code races in the following way:
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 84e0e8c7fb23..7892eb8ed4c8 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1218,8 +1218,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1218 1218
1219 if (!tx->sta) 1219 if (!tx->sta)
1220 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1220 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1221 else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) 1221 else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) {
1222 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1222 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1223 ieee80211_check_fast_xmit(tx->sta);
1224 }
1223 1225
1224 info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; 1226 info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
1225 1227
@@ -2451,7 +2453,8 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
2451 2453
2452 if (test_sta_flag(sta, WLAN_STA_PS_STA) || 2454 if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
2453 test_sta_flag(sta, WLAN_STA_PS_DRIVER) || 2455 test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
2454 test_sta_flag(sta, WLAN_STA_PS_DELIVER)) 2456 test_sta_flag(sta, WLAN_STA_PS_DELIVER) ||
2457 test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT))
2455 goto out; 2458 goto out;
2456 2459
2457 if (sdata->noack_map) 2460 if (sdata->noack_map)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 8f060d7f9a0e..0a49a8c7c564 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2785,6 +2785,7 @@ static int netlink_dump(struct sock *sk)
2785 struct sk_buff *skb = NULL; 2785 struct sk_buff *skb = NULL;
2786 struct nlmsghdr *nlh; 2786 struct nlmsghdr *nlh;
2787 int len, err = -ENOBUFS; 2787 int len, err = -ENOBUFS;
2788 int alloc_min_size;
2788 int alloc_size; 2789 int alloc_size;
2789 2790
2790 mutex_lock(nlk->cb_mutex); 2791 mutex_lock(nlk->cb_mutex);
@@ -2793,9 +2794,6 @@ static int netlink_dump(struct sock *sk)
2793 goto errout_skb; 2794 goto errout_skb;
2794 } 2795 }
2795 2796
2796 cb = &nlk->cb;
2797 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2798
2799 if (!netlink_rx_is_mmaped(sk) && 2797 if (!netlink_rx_is_mmaped(sk) &&
2800 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 2798 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2801 goto errout_skb; 2799 goto errout_skb;
@@ -2805,23 +2803,35 @@ static int netlink_dump(struct sock *sk)
2805 * to reduce number of system calls on dump operations, if user 2803 * to reduce number of system calls on dump operations, if user
2806 * ever provided a big enough buffer. 2804 * ever provided a big enough buffer.
2807 */ 2805 */
2808 if (alloc_size < nlk->max_recvmsg_len) { 2806 cb = &nlk->cb;
2809 skb = netlink_alloc_skb(sk, 2807 alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2810 nlk->max_recvmsg_len, 2808
2811 nlk->portid, 2809 if (alloc_min_size < nlk->max_recvmsg_len) {
2810 alloc_size = nlk->max_recvmsg_len;
2811 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2812 GFP_KERNEL | 2812 GFP_KERNEL |
2813 __GFP_NOWARN | 2813 __GFP_NOWARN |
2814 __GFP_NORETRY); 2814 __GFP_NORETRY);
2815 /* available room should be exact amount to avoid MSG_TRUNC */
2816 if (skb)
2817 skb_reserve(skb, skb_tailroom(skb) -
2818 nlk->max_recvmsg_len);
2819 } 2815 }
2820 if (!skb) 2816 if (!skb) {
2817 alloc_size = alloc_min_size;
2821 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, 2818 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2822 GFP_KERNEL); 2819 GFP_KERNEL);
2820 }
2823 if (!skb) 2821 if (!skb)
2824 goto errout_skb; 2822 goto errout_skb;
2823
2824 /* Trim skb to allocated size. User is expected to provide buffer as
2825 * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
2826 * netlink_recvmsg())). dump will pack as many smaller messages as
2827 * could fit within the allocated skb. skb is typically allocated
2828 * with larger space than required (could be as much as near 2x the
2829 * requested size with align to next power of 2 approach). Allowing
2830 * dump to use the excess space makes it difficult for a user to have a
2831 * reasonable static buffer based on the expected largest dump of a
2832 * single netdev. The outcome is MSG_TRUNC error.
2833 */
2834 skb_reserve(skb, skb_tailroom(skb) - alloc_size);
2825 netlink_skb_set_owner_r(skb, sk); 2835 netlink_skb_set_owner_r(skb, sk);
2826 2836
2827 len = cb->dump(skb, cb); 2837 len = cb->dump(skb, cb);
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 315f5330b6e5..c6a39bf2c3b9 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -684,7 +684,7 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
684{ 684{
685 if (skb_network_offset(skb) > MAX_L2_LEN) { 685 if (skb_network_offset(skb) > MAX_L2_LEN) {
686 OVS_NLERR(1, "L2 header too long to fragment"); 686 OVS_NLERR(1, "L2 header too long to fragment");
687 return; 687 goto err;
688 } 688 }
689 689
690 if (ethertype == htons(ETH_P_IP)) { 690 if (ethertype == htons(ETH_P_IP)) {
@@ -708,8 +708,7 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
708 struct rt6_info ovs_rt; 708 struct rt6_info ovs_rt;
709 709
710 if (!v6ops) { 710 if (!v6ops) {
711 kfree_skb(skb); 711 goto err;
712 return;
713 } 712 }
714 713
715 prepare_frag(vport, skb); 714 prepare_frag(vport, skb);
@@ -728,8 +727,12 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
728 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.", 727 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
729 ovs_vport_name(vport), ntohs(ethertype), mru, 728 ovs_vport_name(vport), ntohs(ethertype), mru,
730 vport->dev->mtu); 729 vport->dev->mtu);
731 kfree_skb(skb); 730 goto err;
732 } 731 }
732
733 return;
734err:
735 kfree_skb(skb);
733} 736}
734 737
735static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, 738static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
@@ -968,7 +971,7 @@ static int execute_masked_set_action(struct sk_buff *skb,
968 case OVS_KEY_ATTR_CT_STATE: 971 case OVS_KEY_ATTR_CT_STATE:
969 case OVS_KEY_ATTR_CT_ZONE: 972 case OVS_KEY_ATTR_CT_ZONE:
970 case OVS_KEY_ATTR_CT_MARK: 973 case OVS_KEY_ATTR_CT_MARK:
971 case OVS_KEY_ATTR_CT_LABEL: 974 case OVS_KEY_ATTR_CT_LABELS:
972 err = -EINVAL; 975 err = -EINVAL;
973 break; 976 break;
974 } 977 }
@@ -1099,6 +1102,12 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1099 break; 1102 break;
1100 1103
1101 case OVS_ACTION_ATTR_CT: 1104 case OVS_ACTION_ATTR_CT:
1105 if (!is_flow_key_valid(key)) {
1106 err = ovs_flow_key_update(skb, key);
1107 if (err)
1108 return err;
1109 }
1110
1102 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key, 1111 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1103 nla_data(a)); 1112 nla_data(a));
1104 1113
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 002a755fa07e..80bf702715bb 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -37,9 +37,9 @@ struct md_mark {
37}; 37};
38 38
39/* Metadata label for masked write to conntrack label. */ 39/* Metadata label for masked write to conntrack label. */
40struct md_label { 40struct md_labels {
41 struct ovs_key_ct_label value; 41 struct ovs_key_ct_labels value;
42 struct ovs_key_ct_label mask; 42 struct ovs_key_ct_labels mask;
43}; 43};
44 44
45/* Conntrack action context for execution. */ 45/* Conntrack action context for execution. */
@@ -47,10 +47,10 @@ struct ovs_conntrack_info {
47 struct nf_conntrack_helper *helper; 47 struct nf_conntrack_helper *helper;
48 struct nf_conntrack_zone zone; 48 struct nf_conntrack_zone zone;
49 struct nf_conn *ct; 49 struct nf_conn *ct;
50 u32 flags; 50 u8 commit : 1;
51 u16 family; 51 u16 family;
52 struct md_mark mark; 52 struct md_mark mark;
53 struct md_label label; 53 struct md_labels labels;
54}; 54};
55 55
56static u16 key_to_nfproto(const struct sw_flow_key *key) 56static u16 key_to_nfproto(const struct sw_flow_key *key)
@@ -109,21 +109,21 @@ static u32 ovs_ct_get_mark(const struct nf_conn *ct)
109#endif 109#endif
110} 110}
111 111
112static void ovs_ct_get_label(const struct nf_conn *ct, 112static void ovs_ct_get_labels(const struct nf_conn *ct,
113 struct ovs_key_ct_label *label) 113 struct ovs_key_ct_labels *labels)
114{ 114{
115 struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL; 115 struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
116 116
117 if (cl) { 117 if (cl) {
118 size_t len = cl->words * sizeof(long); 118 size_t len = cl->words * sizeof(long);
119 119
120 if (len > OVS_CT_LABEL_LEN) 120 if (len > OVS_CT_LABELS_LEN)
121 len = OVS_CT_LABEL_LEN; 121 len = OVS_CT_LABELS_LEN;
122 else if (len < OVS_CT_LABEL_LEN) 122 else if (len < OVS_CT_LABELS_LEN)
123 memset(label, 0, OVS_CT_LABEL_LEN); 123 memset(labels, 0, OVS_CT_LABELS_LEN);
124 memcpy(label, cl->bits, len); 124 memcpy(labels, cl->bits, len);
125 } else { 125 } else {
126 memset(label, 0, OVS_CT_LABEL_LEN); 126 memset(labels, 0, OVS_CT_LABELS_LEN);
127 } 127 }
128} 128}
129 129
@@ -134,7 +134,7 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
134 key->ct.state = state; 134 key->ct.state = state;
135 key->ct.zone = zone->id; 135 key->ct.zone = zone->id;
136 key->ct.mark = ovs_ct_get_mark(ct); 136 key->ct.mark = ovs_ct_get_mark(ct);
137 ovs_ct_get_label(ct, &key->ct.label); 137 ovs_ct_get_labels(ct, &key->ct.labels);
138} 138}
139 139
140/* Update 'key' based on skb->nfct. If 'post_ct' is true, then OVS has 140/* Update 'key' based on skb->nfct. If 'post_ct' is true, then OVS has
@@ -167,7 +167,7 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
167 167
168int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) 168int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
169{ 169{
170 if (nla_put_u8(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state)) 170 if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state))
171 return -EMSGSIZE; 171 return -EMSGSIZE;
172 172
173 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 173 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
@@ -179,8 +179,8 @@ int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
179 return -EMSGSIZE; 179 return -EMSGSIZE;
180 180
181 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 181 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
182 nla_put(skb, OVS_KEY_ATTR_CT_LABEL, sizeof(key->ct.label), 182 nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(key->ct.labels),
183 &key->ct.label)) 183 &key->ct.labels))
184 return -EMSGSIZE; 184 return -EMSGSIZE;
185 185
186 return 0; 186 return 0;
@@ -213,9 +213,9 @@ static int ovs_ct_set_mark(struct sk_buff *skb, struct sw_flow_key *key,
213#endif 213#endif
214} 214}
215 215
216static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key, 216static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key,
217 const struct ovs_key_ct_label *label, 217 const struct ovs_key_ct_labels *labels,
218 const struct ovs_key_ct_label *mask) 218 const struct ovs_key_ct_labels *mask)
219{ 219{
220 enum ip_conntrack_info ctinfo; 220 enum ip_conntrack_info ctinfo;
221 struct nf_conn_labels *cl; 221 struct nf_conn_labels *cl;
@@ -235,15 +235,15 @@ static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key,
235 nf_ct_labels_ext_add(ct); 235 nf_ct_labels_ext_add(ct);
236 cl = nf_ct_labels_find(ct); 236 cl = nf_ct_labels_find(ct);
237 } 237 }
238 if (!cl || cl->words * sizeof(long) < OVS_CT_LABEL_LEN) 238 if (!cl || cl->words * sizeof(long) < OVS_CT_LABELS_LEN)
239 return -ENOSPC; 239 return -ENOSPC;
240 240
241 err = nf_connlabels_replace(ct, (u32 *)label, (u32 *)mask, 241 err = nf_connlabels_replace(ct, (u32 *)labels, (u32 *)mask,
242 OVS_CT_LABEL_LEN / sizeof(u32)); 242 OVS_CT_LABELS_LEN / sizeof(u32));
243 if (err) 243 if (err)
244 return err; 244 return err;
245 245
246 ovs_ct_get_label(ct, &key->ct.label); 246 ovs_ct_get_labels(ct, &key->ct.labels);
247 return 0; 247 return 0;
248} 248}
249 249
@@ -465,12 +465,12 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
465 return 0; 465 return 0;
466} 466}
467 467
468static bool label_nonzero(const struct ovs_key_ct_label *label) 468static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
469{ 469{
470 size_t i; 470 size_t i;
471 471
472 for (i = 0; i < sizeof(*label); i++) 472 for (i = 0; i < sizeof(*labels); i++)
473 if (label->ct_label[i]) 473 if (labels->ct_labels[i])
474 return true; 474 return true;
475 475
476 return false; 476 return false;
@@ -493,7 +493,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
493 return err; 493 return err;
494 } 494 }
495 495
496 if (info->flags & OVS_CT_F_COMMIT) 496 if (info->commit)
497 err = ovs_ct_commit(net, key, info, skb); 497 err = ovs_ct_commit(net, key, info, skb);
498 else 498 else
499 err = ovs_ct_lookup(net, key, info, skb); 499 err = ovs_ct_lookup(net, key, info, skb);
@@ -506,9 +506,9 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
506 if (err) 506 if (err)
507 goto err; 507 goto err;
508 } 508 }
509 if (label_nonzero(&info->label.mask)) 509 if (labels_nonzero(&info->labels.mask))
510 err = ovs_ct_set_label(skb, key, &info->label.value, 510 err = ovs_ct_set_labels(skb, key, &info->labels.value,
511 &info->label.mask); 511 &info->labels.mask);
512err: 512err:
513 skb_push(skb, nh_ofs); 513 skb_push(skb, nh_ofs);
514 return err; 514 return err;
@@ -539,14 +539,13 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
539} 539}
540 540
541static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = { 541static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
542 [OVS_CT_ATTR_FLAGS] = { .minlen = sizeof(u32), 542 [OVS_CT_ATTR_COMMIT] = { .minlen = 0, .maxlen = 0 },
543 .maxlen = sizeof(u32) },
544 [OVS_CT_ATTR_ZONE] = { .minlen = sizeof(u16), 543 [OVS_CT_ATTR_ZONE] = { .minlen = sizeof(u16),
545 .maxlen = sizeof(u16) }, 544 .maxlen = sizeof(u16) },
546 [OVS_CT_ATTR_MARK] = { .minlen = sizeof(struct md_mark), 545 [OVS_CT_ATTR_MARK] = { .minlen = sizeof(struct md_mark),
547 .maxlen = sizeof(struct md_mark) }, 546 .maxlen = sizeof(struct md_mark) },
548 [OVS_CT_ATTR_LABEL] = { .minlen = sizeof(struct md_label), 547 [OVS_CT_ATTR_LABELS] = { .minlen = sizeof(struct md_labels),
549 .maxlen = sizeof(struct md_label) }, 548 .maxlen = sizeof(struct md_labels) },
550 [OVS_CT_ATTR_HELPER] = { .minlen = 1, 549 [OVS_CT_ATTR_HELPER] = { .minlen = 1,
551 .maxlen = NF_CT_HELPER_NAME_LEN } 550 .maxlen = NF_CT_HELPER_NAME_LEN }
552}; 551};
@@ -576,8 +575,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
576 } 575 }
577 576
578 switch (type) { 577 switch (type) {
579 case OVS_CT_ATTR_FLAGS: 578 case OVS_CT_ATTR_COMMIT:
580 info->flags = nla_get_u32(a); 579 info->commit = true;
581 break; 580 break;
582#ifdef CONFIG_NF_CONNTRACK_ZONES 581#ifdef CONFIG_NF_CONNTRACK_ZONES
583 case OVS_CT_ATTR_ZONE: 582 case OVS_CT_ATTR_ZONE:
@@ -593,10 +592,10 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
593 } 592 }
594#endif 593#endif
595#ifdef CONFIG_NF_CONNTRACK_LABELS 594#ifdef CONFIG_NF_CONNTRACK_LABELS
596 case OVS_CT_ATTR_LABEL: { 595 case OVS_CT_ATTR_LABELS: {
597 struct md_label *label = nla_data(a); 596 struct md_labels *labels = nla_data(a);
598 597
599 info->label = *label; 598 info->labels = *labels;
600 break; 599 break;
601 } 600 }
602#endif 601#endif
@@ -633,7 +632,7 @@ bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr)
633 attr == OVS_KEY_ATTR_CT_MARK) 632 attr == OVS_KEY_ATTR_CT_MARK)
634 return true; 633 return true;
635 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 634 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
636 attr == OVS_KEY_ATTR_CT_LABEL) { 635 attr == OVS_KEY_ATTR_CT_LABELS) {
637 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 636 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
638 637
639 return ovs_net->xt_label; 638 return ovs_net->xt_label;
@@ -701,7 +700,7 @@ int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
701 if (!start) 700 if (!start)
702 return -EMSGSIZE; 701 return -EMSGSIZE;
703 702
704 if (nla_put_u32(skb, OVS_CT_ATTR_FLAGS, ct_info->flags)) 703 if (ct_info->commit && nla_put_flag(skb, OVS_CT_ATTR_COMMIT))
705 return -EMSGSIZE; 704 return -EMSGSIZE;
706 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 705 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
707 nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id)) 706 nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
@@ -711,8 +710,8 @@ int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
711 &ct_info->mark)) 710 &ct_info->mark))
712 return -EMSGSIZE; 711 return -EMSGSIZE;
713 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 712 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
714 nla_put(skb, OVS_CT_ATTR_LABEL, sizeof(ct_info->label), 713 nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
715 &ct_info->label)) 714 &ct_info->labels))
716 return -EMSGSIZE; 715 return -EMSGSIZE;
717 if (ct_info->helper) { 716 if (ct_info->helper) {
718 if (nla_put_string(skb, OVS_CT_ATTR_HELPER, 717 if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
@@ -737,7 +736,7 @@ void ovs_ct_free_action(const struct nlattr *a)
737 736
738void ovs_ct_init(struct net *net) 737void ovs_ct_init(struct net *net)
739{ 738{
740 unsigned int n_bits = sizeof(struct ovs_key_ct_label) * BITS_PER_BYTE; 739 unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
741 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 740 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
742 741
743 if (nf_connlabels_get(net, n_bits)) { 742 if (nf_connlabels_get(net, n_bits)) {
diff --git a/net/openvswitch/conntrack.h b/net/openvswitch/conntrack.h
index 43f5dd7a5577..da8714942c95 100644
--- a/net/openvswitch/conntrack.h
+++ b/net/openvswitch/conntrack.h
@@ -34,6 +34,13 @@ int ovs_ct_execute(struct net *, struct sk_buff *, struct sw_flow_key *,
34void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key); 34void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key);
35int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb); 35int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb);
36void ovs_ct_free_action(const struct nlattr *a); 36void ovs_ct_free_action(const struct nlattr *a);
37
38static inline bool ovs_ct_state_supported(u32 state)
39{
40 return !(state & ~(OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED |
41 OVS_CS_F_RELATED | OVS_CS_F_REPLY_DIR |
42 OVS_CS_F_INVALID | OVS_CS_F_TRACKED));
43}
37#else 44#else
38#include <linux/errno.h> 45#include <linux/errno.h>
39 46
@@ -46,6 +53,11 @@ static inline bool ovs_ct_verify(struct net *net, int attr)
46 return false; 53 return false;
47} 54}
48 55
56static inline bool ovs_ct_state_supported(u32 state)
57{
58 return false;
59}
60
49static inline int ovs_ct_copy_action(struct net *net, const struct nlattr *nla, 61static inline int ovs_ct_copy_action(struct net *net, const struct nlattr *nla,
50 const struct sw_flow_key *key, 62 const struct sw_flow_key *key,
51 struct sw_flow_actions **acts, bool log) 63 struct sw_flow_actions **acts, bool log)
@@ -72,7 +84,7 @@ static inline void ovs_ct_fill_key(const struct sk_buff *skb,
72 key->ct.state = 0; 84 key->ct.state = 0;
73 key->ct.zone = 0; 85 key->ct.zone = 0;
74 key->ct.mark = 0; 86 key->ct.mark = 0;
75 memset(&key->ct.label, 0, sizeof(key->ct.label)); 87 memset(&key->ct.labels, 0, sizeof(key->ct.labels));
76} 88}
77 89
78static inline int ovs_ct_put_key(const struct sw_flow_key *key, 90static inline int ovs_ct_put_key(const struct sw_flow_key *key,
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index fe527d2dd4b7..8cfa15a08668 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -116,7 +116,7 @@ struct sw_flow_key {
116 u16 zone; 116 u16 zone;
117 u32 mark; 117 u32 mark;
118 u8 state; 118 u8 state;
119 struct ovs_key_ct_label label; 119 struct ovs_key_ct_labels labels;
120 } ct; 120 } ct;
121 121
122} __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ 122} __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 5c030a4d7338..171a691f1c32 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -291,10 +291,10 @@ size_t ovs_key_attr_size(void)
291 + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ 291 + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
292 + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */ 292 + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */
293 + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */ 293 + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */
294 + nla_total_size(1) /* OVS_KEY_ATTR_CT_STATE */ 294 + nla_total_size(4) /* OVS_KEY_ATTR_CT_STATE */
295 + nla_total_size(2) /* OVS_KEY_ATTR_CT_ZONE */ 295 + nla_total_size(2) /* OVS_KEY_ATTR_CT_ZONE */
296 + nla_total_size(4) /* OVS_KEY_ATTR_CT_MARK */ 296 + nla_total_size(4) /* OVS_KEY_ATTR_CT_MARK */
297 + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABEL */ 297 + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABELS */
298 + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ 298 + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
299 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ 299 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
300 + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */ 300 + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */
@@ -349,10 +349,10 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
349 [OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED, 349 [OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED,
350 .next = ovs_tunnel_key_lens, }, 350 .next = ovs_tunnel_key_lens, },
351 [OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) }, 351 [OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) },
352 [OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u8) }, 352 [OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u32) },
353 [OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) }, 353 [OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) },
354 [OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) }, 354 [OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) },
355 [OVS_KEY_ATTR_CT_LABEL] = { .len = sizeof(struct ovs_key_ct_label) }, 355 [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
356}; 356};
357 357
358static bool check_attr_len(unsigned int attr_len, unsigned int expected_len) 358static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
@@ -814,7 +814,13 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
814 814
815 if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) && 815 if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
816 ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) { 816 ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
817 u8 ct_state = nla_get_u8(a[OVS_KEY_ATTR_CT_STATE]); 817 u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
818
819 if (!is_mask && !ovs_ct_state_supported(ct_state)) {
820 OVS_NLERR(log, "ct_state flags %08x unsupported",
821 ct_state);
822 return -EINVAL;
823 }
818 824
819 SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask); 825 SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask);
820 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE); 826 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
@@ -833,14 +839,14 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
833 SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask); 839 SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
834 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK); 840 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
835 } 841 }
836 if (*attrs & (1 << OVS_KEY_ATTR_CT_LABEL) && 842 if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) &&
837 ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABEL)) { 843 ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) {
838 const struct ovs_key_ct_label *cl; 844 const struct ovs_key_ct_labels *cl;
839 845
840 cl = nla_data(a[OVS_KEY_ATTR_CT_LABEL]); 846 cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]);
841 SW_FLOW_KEY_MEMCPY(match, ct.label, cl->ct_label, 847 SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels,
842 sizeof(*cl), is_mask); 848 sizeof(*cl), is_mask);
843 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABEL); 849 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS);
844 } 850 }
845 return 0; 851 return 0;
846} 852}
@@ -1973,7 +1979,7 @@ static int validate_set(const struct nlattr *a,
1973 case OVS_KEY_ATTR_PRIORITY: 1979 case OVS_KEY_ATTR_PRIORITY:
1974 case OVS_KEY_ATTR_SKB_MARK: 1980 case OVS_KEY_ATTR_SKB_MARK:
1975 case OVS_KEY_ATTR_CT_MARK: 1981 case OVS_KEY_ATTR_CT_MARK:
1976 case OVS_KEY_ATTR_CT_LABEL: 1982 case OVS_KEY_ATTR_CT_LABELS:
1977 case OVS_KEY_ATTR_ETHERNET: 1983 case OVS_KEY_ATTR_ETHERNET:
1978 break; 1984 break;
1979 1985
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index f2ea83ba4763..c7f74aab34b9 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -93,7 +93,8 @@ struct sw_flow *ovs_flow_alloc(void)
93 93
94 /* Initialize the default stat node. */ 94 /* Initialize the default stat node. */
95 stats = kmem_cache_alloc_node(flow_stats_cache, 95 stats = kmem_cache_alloc_node(flow_stats_cache,
96 GFP_KERNEL | __GFP_ZERO, 0); 96 GFP_KERNEL | __GFP_ZERO,
97 node_online(0) ? 0 : NUMA_NO_NODE);
97 if (!stats) 98 if (!stats)
98 goto err; 99 goto err;
99 100
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index dc81dc619aa2..12a36ac21eda 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -280,35 +280,19 @@ void ovs_vport_del(struct vport *vport)
280 */ 280 */
281void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) 281void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
282{ 282{
283 struct net_device *dev = vport->dev; 283 const struct rtnl_link_stats64 *dev_stats;
284 int i; 284 struct rtnl_link_stats64 temp;
285 285
286 memset(stats, 0, sizeof(*stats)); 286 dev_stats = dev_get_stats(vport->dev, &temp);
287 stats->rx_errors = dev->stats.rx_errors; 287 stats->rx_errors = dev_stats->rx_errors;
288 stats->tx_errors = dev->stats.tx_errors; 288 stats->tx_errors = dev_stats->tx_errors;
289 stats->tx_dropped = dev->stats.tx_dropped; 289 stats->tx_dropped = dev_stats->tx_dropped;
290 stats->rx_dropped = dev->stats.rx_dropped; 290 stats->rx_dropped = dev_stats->rx_dropped;
291 291
292 stats->rx_dropped += atomic_long_read(&dev->rx_dropped); 292 stats->rx_bytes = dev_stats->rx_bytes;
293 stats->tx_dropped += atomic_long_read(&dev->tx_dropped); 293 stats->rx_packets = dev_stats->rx_packets;
294 294 stats->tx_bytes = dev_stats->tx_bytes;
295 for_each_possible_cpu(i) { 295 stats->tx_packets = dev_stats->tx_packets;
296 const struct pcpu_sw_netstats *percpu_stats;
297 struct pcpu_sw_netstats local_stats;
298 unsigned int start;
299
300 percpu_stats = per_cpu_ptr(dev->tstats, i);
301
302 do {
303 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
304 local_stats = *percpu_stats;
305 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
306
307 stats->rx_bytes += local_stats.rx_bytes;
308 stats->rx_packets += local_stats.rx_packets;
309 stats->tx_bytes += local_stats.tx_bytes;
310 stats->tx_packets += local_stats.tx_packets;
311 }
312} 296}
313 297
314/** 298/**
@@ -460,6 +444,15 @@ int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
460 444
461 OVS_CB(skb)->input_vport = vport; 445 OVS_CB(skb)->input_vport = vport;
462 OVS_CB(skb)->mru = 0; 446 OVS_CB(skb)->mru = 0;
447 if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
448 u32 mark;
449
450 mark = skb->mark;
451 skb_scrub_packet(skb, true);
452 skb->mark = mark;
453 tun_info = NULL;
454 }
455
463 /* Extract flow from 'skb' into 'key'. */ 456 /* Extract flow from 'skb' into 'key'. */
464 error = ovs_flow_key_extract(tun_info, skb, &key); 457 error = ovs_flow_key_extract(tun_info, skb, &key);
465 if (unlikely(error)) { 458 if (unlikely(error)) {
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 2d1be4a760fd..32fcdecdb9e2 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -31,13 +31,17 @@
31 31
32#define MIRRED_TAB_MASK 7 32#define MIRRED_TAB_MASK 7
33static LIST_HEAD(mirred_list); 33static LIST_HEAD(mirred_list);
34static DEFINE_SPINLOCK(mirred_list_lock);
34 35
35static void tcf_mirred_release(struct tc_action *a, int bind) 36static void tcf_mirred_release(struct tc_action *a, int bind)
36{ 37{
37 struct tcf_mirred *m = to_mirred(a); 38 struct tcf_mirred *m = to_mirred(a);
38 struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1); 39 struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
39 40
41 /* We could be called either in a RCU callback or with RTNL lock held. */
42 spin_lock_bh(&mirred_list_lock);
40 list_del(&m->tcfm_list); 43 list_del(&m->tcfm_list);
44 spin_unlock_bh(&mirred_list_lock);
41 if (dev) 45 if (dev)
42 dev_put(dev); 46 dev_put(dev);
43} 47}
@@ -103,10 +107,10 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
103 } else { 107 } else {
104 if (bind) 108 if (bind)
105 return 0; 109 return 0;
106 if (!ovr) { 110
107 tcf_hash_release(a, bind); 111 tcf_hash_release(a, bind);
112 if (!ovr)
108 return -EEXIST; 113 return -EEXIST;
109 }
110 } 114 }
111 m = to_mirred(a); 115 m = to_mirred(a);
112 116
@@ -123,7 +127,9 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
123 } 127 }
124 128
125 if (ret == ACT_P_CREATED) { 129 if (ret == ACT_P_CREATED) {
130 spin_lock_bh(&mirred_list_lock);
126 list_add(&m->tcfm_list, &mirred_list); 131 list_add(&m->tcfm_list, &mirred_list);
132 spin_unlock_bh(&mirred_list_lock);
127 tcf_hash_insert(a); 133 tcf_hash_insert(a);
128 } 134 }
129 135
@@ -173,6 +179,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
173 179
174 skb2->skb_iif = skb->dev->ifindex; 180 skb2->skb_iif = skb->dev->ifindex;
175 skb2->dev = dev; 181 skb2->dev = dev;
182 skb_sender_cpu_clear(skb2);
176 err = dev_queue_xmit(skb2); 183 err = dev_queue_xmit(skb2);
177 184
178 if (err) { 185 if (err) {
@@ -221,7 +228,8 @@ static int mirred_device_event(struct notifier_block *unused,
221 struct tcf_mirred *m; 228 struct tcf_mirred *m;
222 229
223 ASSERT_RTNL(); 230 ASSERT_RTNL();
224 if (event == NETDEV_UNREGISTER) 231 if (event == NETDEV_UNREGISTER) {
232 spin_lock_bh(&mirred_list_lock);
225 list_for_each_entry(m, &mirred_list, tcfm_list) { 233 list_for_each_entry(m, &mirred_list, tcfm_list) {
226 if (rcu_access_pointer(m->tcfm_dev) == dev) { 234 if (rcu_access_pointer(m->tcfm_dev) == dev) {
227 dev_put(dev); 235 dev_put(dev);
@@ -231,6 +239,8 @@ static int mirred_device_event(struct notifier_block *unused,
231 RCU_INIT_POINTER(m->tcfm_dev, NULL); 239 RCU_INIT_POINTER(m->tcfm_dev, NULL);
232 } 240 }
233 } 241 }
242 spin_unlock_bh(&mirred_list_lock);
243 }
234 244
235 return NOTIFY_DONE; 245 return NOTIFY_DONE;
236} 246}
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 9d15cb6b8cb1..86b04e31e60b 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -368,6 +368,15 @@ static unsigned int hhf_drop(struct Qdisc *sch)
368 return bucket - q->buckets; 368 return bucket - q->buckets;
369} 369}
370 370
371static unsigned int hhf_qdisc_drop(struct Qdisc *sch)
372{
373 unsigned int prev_backlog;
374
375 prev_backlog = sch->qstats.backlog;
376 hhf_drop(sch);
377 return prev_backlog - sch->qstats.backlog;
378}
379
371static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) 380static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
372{ 381{
373 struct hhf_sched_data *q = qdisc_priv(sch); 382 struct hhf_sched_data *q = qdisc_priv(sch);
@@ -696,7 +705,7 @@ static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
696 .enqueue = hhf_enqueue, 705 .enqueue = hhf_enqueue,
697 .dequeue = hhf_dequeue, 706 .dequeue = hhf_dequeue,
698 .peek = qdisc_peek_dequeued, 707 .peek = qdisc_peek_dequeued,
699 .drop = hhf_drop, 708 .drop = hhf_qdisc_drop,
700 .init = hhf_init, 709 .init = hhf_init,
701 .reset = hhf_reset, 710 .reset = hhf_reset,
702 .destroy = hhf_destroy, 711 .destroy = hhf_destroy,
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index fda38f830a10..77f5d17e2612 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -16,6 +16,7 @@
16#include <linux/notifier.h> 16#include <linux/notifier.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/if_bridge.h> 18#include <linux/if_bridge.h>
19#include <linux/if_vlan.h>
19#include <net/ip_fib.h> 20#include <net/ip_fib.h>
20#include <net/switchdev.h> 21#include <net/switchdev.h>
21 22
@@ -634,6 +635,8 @@ static int switchdev_port_br_afspec(struct net_device *dev,
634 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 635 if (nla_len(attr) != sizeof(struct bridge_vlan_info))
635 return -EINVAL; 636 return -EINVAL;
636 vinfo = nla_data(attr); 637 vinfo = nla_data(attr);
638 if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
639 return -EINVAL;
637 vlan->flags = vinfo->flags; 640 vlan->flags = vinfo->flags;
638 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 641 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
639 if (vlan->vid_begin) 642 if (vlan->vid_begin)
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index a82c5848d4bc..5351a3f97e8e 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -357,7 +357,7 @@ static inline u32 msg_importance(struct tipc_msg *m)
357 if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m))) 357 if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m)))
358 return usr; 358 return usr;
359 if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)) 359 if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))
360 return msg_bits(m, 5, 13, 0x7); 360 return msg_bits(m, 9, 0, 0x7);
361 return TIPC_SYSTEM_IMPORTANCE; 361 return TIPC_SYSTEM_IMPORTANCE;
362} 362}
363 363
@@ -366,7 +366,7 @@ static inline void msg_set_importance(struct tipc_msg *m, u32 i)
366 int usr = msg_user(m); 366 int usr = msg_user(m);
367 367
368 if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))) 368 if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)))
369 msg_set_bits(m, 5, 13, 0x7, i); 369 msg_set_bits(m, 9, 0, 0x7, i);
370 else if (i < TIPC_SYSTEM_IMPORTANCE) 370 else if (i < TIPC_SYSTEM_IMPORTANCE)
371 msg_set_user(m, i); 371 msg_set_user(m, i);
372 else 372 else
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 703875fd6cde..2c32a83037a3 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1116,7 +1116,7 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1116 } 1116 }
1117 1117
1118 /* Ignore duplicate packets */ 1118 /* Ignore duplicate packets */
1119 if (less(oseqno, rcv_nxt)) 1119 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1120 return true; 1120 return true;
1121 1121
1122 /* Initiate or update failover mode if applicable */ 1122 /* Initiate or update failover mode if applicable */
@@ -1146,8 +1146,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1146 if (!pl || !tipc_link_is_up(pl)) 1146 if (!pl || !tipc_link_is_up(pl))
1147 return true; 1147 return true;
1148 1148
1149 /* Initiate or update synch mode if applicable */ 1149 /* Initiate synch mode if applicable */
1150 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) { 1150 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
1151 syncpt = iseqno + exp_pkts - 1; 1151 syncpt = iseqno + exp_pkts - 1;
1152 if (!tipc_link_is_up(l)) { 1152 if (!tipc_link_is_up(l)) {
1153 tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); 1153 tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ef31b40ad550..94f658235fb4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2064,6 +2064,11 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2064 goto out; 2064 goto out;
2065 } 2065 }
2066 2066
2067 if (flags & MSG_PEEK)
2068 skip = sk_peek_offset(sk, flags);
2069 else
2070 skip = 0;
2071
2067 do { 2072 do {
2068 int chunk; 2073 int chunk;
2069 struct sk_buff *skb, *last; 2074 struct sk_buff *skb, *last;
@@ -2112,7 +2117,6 @@ unlock:
2112 break; 2117 break;
2113 } 2118 }
2114 2119
2115 skip = sk_peek_offset(sk, flags);
2116 while (skip >= unix_skb_len(skb)) { 2120 while (skip >= unix_skb_len(skb)) {
2117 skip -= unix_skb_len(skb); 2121 skip -= unix_skb_len(skb);
2118 last = skb; 2122 last = skb;
@@ -2179,14 +2183,12 @@ unlock:
2179 if (UNIXCB(skb).fp) 2183 if (UNIXCB(skb).fp)
2180 scm.fp = scm_fp_dup(UNIXCB(skb).fp); 2184 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2181 2185
2182 if (skip) { 2186 sk_peek_offset_fwd(sk, chunk);
2183 sk_peek_offset_fwd(sk, chunk);
2184 skip -= chunk;
2185 }
2186 2187
2187 if (UNIXCB(skb).fp) 2188 if (UNIXCB(skb).fp)
2188 break; 2189 break;
2189 2190
2191 skip = 0;
2190 last = skb; 2192 last = skb;
2191 last_len = skb->len; 2193 last_len = skb->len;
2192 unix_state_lock(sk); 2194 unix_state_lock(sk);
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 39eac1fd5706..addf060399e0 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -134,8 +134,10 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
134 kdebug("- %u", key->serial); 134 kdebug("- %u", key->serial);
135 key_check(key); 135 key_check(key);
136 136
137 /* Throw away the key data */ 137 /* Throw away the key data if the key is instantiated */
138 if (key->type->destroy) 138 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
139 !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
140 key->type->destroy)
139 key->type->destroy(key); 141 key->type->destroy(key);
140 142
141 security_key_free(key); 143 security_key_free(key);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 486ef6fa393b..0d6253124278 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -440,6 +440,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
440 440
441 kenter(""); 441 kenter("");
442 442
443 if (ctx->index_key.type == &key_type_keyring)
444 return ERR_PTR(-EPERM);
445
443 user = key_user_lookup(current_fsuid()); 446 user = key_user_lookup(current_fsuid());
444 if (!user) 447 if (!user)
445 return ERR_PTR(-ENOMEM); 448 return ERR_PTR(-ENOMEM);
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
index 4449d1a99089..2433f7c81472 100644
--- a/sound/hda/ext/hdac_ext_bus.c
+++ b/sound/hda/ext/hdac_ext_bus.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/io.h>
22#include <sound/hdaudio_ext.h> 23#include <sound/hdaudio_ext.h>
23 24
24MODULE_DESCRIPTION("HDA extended core"); 25MODULE_DESCRIPTION("HDA extended core");
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 37f43a1b34ef..a249d5486889 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -3367,10 +3367,8 @@ int snd_hda_codec_build_pcms(struct hda_codec *codec)
3367 int dev, err; 3367 int dev, err;
3368 3368
3369 err = snd_hda_codec_parse_pcms(codec); 3369 err = snd_hda_codec_parse_pcms(codec);
3370 if (err < 0) { 3370 if (err < 0)
3371 snd_hda_codec_reset(codec);
3372 return err; 3371 return err;
3373 }
3374 3372
3375 /* attach a new PCM streams */ 3373 /* attach a new PCM streams */
3376 list_for_each_entry(cpcm, &codec->pcm_list_head, list) { 3374 list_for_each_entry(cpcm, &codec->pcm_list_head, list) {
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index ca03c40609fc..2f0ec7c45fc7 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -819,6 +819,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
819 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410), 819 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
820 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410), 820 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
821 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD), 821 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
822 SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
822 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), 823 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
823 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), 824 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
824 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), 825 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index 3c2f0f8d6266..f823eb502367 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -50,24 +50,24 @@ struct rt298_priv {
50}; 50};
51 51
52static struct reg_default rt298_index_def[] = { 52static struct reg_default rt298_index_def[] = {
53 { 0x01, 0xaaaa }, 53 { 0x01, 0xa5a8 },
54 { 0x02, 0x8aaa }, 54 { 0x02, 0x8e95 },
55 { 0x03, 0x0002 }, 55 { 0x03, 0x0002 },
56 { 0x04, 0xaf01 }, 56 { 0x04, 0xaf67 },
57 { 0x08, 0x000d }, 57 { 0x08, 0x200f },
58 { 0x09, 0xd810 }, 58 { 0x09, 0xd010 },
59 { 0x0a, 0x0120 }, 59 { 0x0a, 0x0100 },
60 { 0x0b, 0x0000 }, 60 { 0x0b, 0x0000 },
61 { 0x0d, 0x2800 }, 61 { 0x0d, 0x2800 },
62 { 0x0f, 0x0000 }, 62 { 0x0f, 0x0022 },
63 { 0x19, 0x0a17 }, 63 { 0x19, 0x0217 },
64 { 0x20, 0x0020 }, 64 { 0x20, 0x0020 },
65 { 0x33, 0x0208 }, 65 { 0x33, 0x0208 },
66 { 0x46, 0x0300 }, 66 { 0x46, 0x0300 },
67 { 0x49, 0x0004 }, 67 { 0x49, 0x4004 },
68 { 0x4f, 0x50e9 }, 68 { 0x4f, 0x50c9 },
69 { 0x50, 0x2000 }, 69 { 0x50, 0x3000 },
70 { 0x63, 0x2902 }, 70 { 0x63, 0x1b02 },
71 { 0x67, 0x1111 }, 71 { 0x67, 0x1111 },
72 { 0x68, 0x1016 }, 72 { 0x68, 0x1016 },
73 { 0x69, 0x273f }, 73 { 0x69, 0x273f },
@@ -1214,7 +1214,7 @@ static int rt298_i2c_probe(struct i2c_client *i2c,
1214 mdelay(10); 1214 mdelay(10);
1215 1215
1216 if (!rt298->pdata.gpio2_en) 1216 if (!rt298->pdata.gpio2_en)
1217 regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x4000); 1217 regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x40);
1218 else 1218 else
1219 regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0); 1219 regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0);
1220 1220
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 2fbc6ef8cbdb..39ebd7bf4f53 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3808,6 +3808,8 @@ static int wm8962_runtime_resume(struct device *dev)
3808 3808
3809 wm8962_reset(wm8962); 3809 wm8962_reset(wm8962);
3810 3810
3811 regcache_mark_dirty(wm8962->regmap);
3812
3811 /* SYSCLK defaults to on; make sure it is off so we can safely 3813 /* SYSCLK defaults to on; make sure it is off so we can safely
3812 * write to registers if the device is declocked. 3814 * write to registers if the device is declocked.
3813 */ 3815 */
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 100d92b5b77e..05977ae1ff2a 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -207,6 +207,34 @@ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
207EXPORT_SYMBOL_GPL(snd_soc_info_volsw); 207EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
208 208
209/** 209/**
210 * snd_soc_info_volsw_sx - Mixer info callback for SX TLV controls
211 * @kcontrol: mixer control
212 * @uinfo: control element information
213 *
214 * Callback to provide information about a single mixer control, or a double
215 * mixer control that spans 2 registers of the SX TLV type. SX TLV controls
216 * have a range that represents both positive and negative values either side
217 * of zero but without a sign bit.
218 *
219 * Returns 0 for success.
220 */
221int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol,
222 struct snd_ctl_elem_info *uinfo)
223{
224 struct soc_mixer_control *mc =
225 (struct soc_mixer_control *)kcontrol->private_value;
226
227 snd_soc_info_volsw(kcontrol, uinfo);
228 /* Max represents the number of levels in an SX control not the
229 * maximum value, so add the minimum value back on
230 */
231 uinfo->value.integer.max += mc->min;
232
233 return 0;
234}
235EXPORT_SYMBOL_GPL(snd_soc_info_volsw_sx);
236
237/**
210 * snd_soc_get_volsw - single mixer get callback 238 * snd_soc_get_volsw - single mixer get callback
211 * @kcontrol: mixer control 239 * @kcontrol: mixer control
212 * @ucontrol: control element information 240 * @ucontrol: control element information
diff --git a/tools/power/cpupower/debug/i386/dump_psb.c b/tools/power/cpupower/debug/i386/dump_psb.c
index 8d6a47514253..2c768cf70128 100644
--- a/tools/power/cpupower/debug/i386/dump_psb.c
+++ b/tools/power/cpupower/debug/i386/dump_psb.c
@@ -134,7 +134,7 @@ next_one:
134} 134}
135 135
136static struct option info_opts[] = { 136static struct option info_opts[] = {
137 {.name = "numpst", .has_arg=no_argument, .flag=NULL, .val='n'}, 137 {"numpst", no_argument, NULL, 'n'},
138}; 138};
139 139
140void print_help(void) 140void print_help(void)
diff --git a/tools/power/cpupower/man/cpupower-idle-set.1 b/tools/power/cpupower/man/cpupower-idle-set.1
index 3e6799d7a79f..580c4e3ea92a 100644
--- a/tools/power/cpupower/man/cpupower-idle-set.1
+++ b/tools/power/cpupower/man/cpupower-idle-set.1
@@ -20,7 +20,9 @@ Disable a specific processor sleep state.
20Enable a specific processor sleep state. 20Enable a specific processor sleep state.
21.TP 21.TP
22\fB\-D\fR \fB\-\-disable-by-latency\fR <LATENCY> 22\fB\-D\fR \fB\-\-disable-by-latency\fR <LATENCY>
23Disable all idle states with a equal or higher latency than <LATENCY> 23Disable all idle states with a equal or higher latency than <LATENCY>.
24
25Enable all idle states with a latency lower than <LATENCY>.
24.TP 26.TP
25\fB\-E\fR \fB\-\-enable-all\fR 27\fB\-E\fR \fB\-\-enable-all\fR
26Enable all idle states if not enabled already. 28Enable all idle states if not enabled already.
diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c
index b4b90a97662c..0e6764330241 100644
--- a/tools/power/cpupower/utils/cpufreq-info.c
+++ b/tools/power/cpupower/utils/cpufreq-info.c
@@ -536,21 +536,21 @@ static int get_latency(unsigned int cpu, unsigned int human)
536} 536}
537 537
538static struct option info_opts[] = { 538static struct option info_opts[] = {
539 { .name = "debug", .has_arg = no_argument, .flag = NULL, .val = 'e'}, 539 {"debug", no_argument, NULL, 'e'},
540 { .name = "boost", .has_arg = no_argument, .flag = NULL, .val = 'b'}, 540 {"boost", no_argument, NULL, 'b'},
541 { .name = "freq", .has_arg = no_argument, .flag = NULL, .val = 'f'}, 541 {"freq", no_argument, NULL, 'f'},
542 { .name = "hwfreq", .has_arg = no_argument, .flag = NULL, .val = 'w'}, 542 {"hwfreq", no_argument, NULL, 'w'},
543 { .name = "hwlimits", .has_arg = no_argument, .flag = NULL, .val = 'l'}, 543 {"hwlimits", no_argument, NULL, 'l'},
544 { .name = "driver", .has_arg = no_argument, .flag = NULL, .val = 'd'}, 544 {"driver", no_argument, NULL, 'd'},
545 { .name = "policy", .has_arg = no_argument, .flag = NULL, .val = 'p'}, 545 {"policy", no_argument, NULL, 'p'},
546 { .name = "governors", .has_arg = no_argument, .flag = NULL, .val = 'g'}, 546 {"governors", no_argument, NULL, 'g'},
547 { .name = "related-cpus", .has_arg = no_argument, .flag = NULL, .val = 'r'}, 547 {"related-cpus", no_argument, NULL, 'r'},
548 { .name = "affected-cpus",.has_arg = no_argument, .flag = NULL, .val = 'a'}, 548 {"affected-cpus", no_argument, NULL, 'a'},
549 { .name = "stats", .has_arg = no_argument, .flag = NULL, .val = 's'}, 549 {"stats", no_argument, NULL, 's'},
550 { .name = "latency", .has_arg = no_argument, .flag = NULL, .val = 'y'}, 550 {"latency", no_argument, NULL, 'y'},
551 { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, 551 {"proc", no_argument, NULL, 'o'},
552 { .name = "human", .has_arg = no_argument, .flag = NULL, .val = 'm'}, 552 {"human", no_argument, NULL, 'm'},
553 { .name = "no-rounding", .has_arg = no_argument, .flag = NULL, .val = 'n'}, 553 {"no-rounding", no_argument, NULL, 'n'},
554 { }, 554 { },
555}; 555};
556 556
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
index 4e213576381e..0fbd1a22c0a9 100644
--- a/tools/power/cpupower/utils/cpufreq-set.c
+++ b/tools/power/cpupower/utils/cpufreq-set.c
@@ -22,11 +22,11 @@
22#define NORM_FREQ_LEN 32 22#define NORM_FREQ_LEN 32
23 23
24static struct option set_opts[] = { 24static struct option set_opts[] = {
25 { .name = "min", .has_arg = required_argument, .flag = NULL, .val = 'd'}, 25 {"min", required_argument, NULL, 'd'},
26 { .name = "max", .has_arg = required_argument, .flag = NULL, .val = 'u'}, 26 {"max", required_argument, NULL, 'u'},
27 { .name = "governor", .has_arg = required_argument, .flag = NULL, .val = 'g'}, 27 {"governor", required_argument, NULL, 'g'},
28 { .name = "freq", .has_arg = required_argument, .flag = NULL, .val = 'f'}, 28 {"freq", required_argument, NULL, 'f'},
29 { .name = "related", .has_arg = no_argument, .flag = NULL, .val='r'}, 29 {"related", no_argument, NULL, 'r'},
30 { }, 30 { },
31}; 31};
32 32
diff --git a/tools/power/cpupower/utils/cpuidle-info.c b/tools/power/cpupower/utils/cpuidle-info.c
index 75e66de7e7a7..750c1d82c3f7 100644
--- a/tools/power/cpupower/utils/cpuidle-info.c
+++ b/tools/power/cpupower/utils/cpuidle-info.c
@@ -126,8 +126,8 @@ static void proc_cpuidle_cpu_output(unsigned int cpu)
126} 126}
127 127
128static struct option info_opts[] = { 128static struct option info_opts[] = {
129 { .name = "silent", .has_arg = no_argument, .flag = NULL, .val = 's'}, 129 {"silent", no_argument, NULL, 's'},
130 { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, 130 {"proc", no_argument, NULL, 'o'},
131 { }, 131 { },
132}; 132};
133 133
diff --git a/tools/power/cpupower/utils/cpuidle-set.c b/tools/power/cpupower/utils/cpuidle-set.c
index d45d8d775c02..d6b6ae44b8c2 100644
--- a/tools/power/cpupower/utils/cpuidle-set.c
+++ b/tools/power/cpupower/utils/cpuidle-set.c
@@ -13,15 +13,11 @@
13#include "helpers/sysfs.h" 13#include "helpers/sysfs.h"
14 14
15static struct option info_opts[] = { 15static struct option info_opts[] = {
16 { .name = "disable", 16 {"disable", required_argument, NULL, 'd'},
17 .has_arg = required_argument, .flag = NULL, .val = 'd'}, 17 {"enable", required_argument, NULL, 'e'},
18 { .name = "enable", 18 {"disable-by-latency", required_argument, NULL, 'D'},
19 .has_arg = required_argument, .flag = NULL, .val = 'e'}, 19 {"enable-all", no_argument, NULL, 'E'},
20 { .name = "disable-by-latency", 20 { },
21 .has_arg = required_argument, .flag = NULL, .val = 'D'},
22 { .name = "enable-all",
23 .has_arg = no_argument, .flag = NULL, .val = 'E'},
24 { },
25}; 21};
26 22
27 23
@@ -148,14 +144,21 @@ int cmd_idle_set(int argc, char **argv)
148 (cpu, idlestate); 144 (cpu, idlestate);
149 state_latency = sysfs_get_idlestate_latency 145 state_latency = sysfs_get_idlestate_latency
150 (cpu, idlestate); 146 (cpu, idlestate);
151 printf("CPU: %u - idlestate %u - state_latency: %llu - latency: %llu\n", 147 if (disabled == 1) {
152 cpu, idlestate, state_latency, latency); 148 if (latency > state_latency){
153 if (disabled == 1 || latency > state_latency) 149 ret = sysfs_idlestate_disable
150 (cpu, idlestate, 0);
151 if (ret == 0)
152 printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu);
153 }
154 continue; 154 continue;
155 ret = sysfs_idlestate_disable 155 }
156 (cpu, idlestate, 1); 156 if (latency <= state_latency){
157 if (ret == 0) 157 ret = sysfs_idlestate_disable
158 (cpu, idlestate, 1);
159 if (ret == 0)
158 printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu); 160 printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu);
161 }
159 } 162 }
160 break; 163 break;
161 case 'E': 164 case 'E':
diff --git a/tools/power/cpupower/utils/cpupower-info.c b/tools/power/cpupower/utils/cpupower-info.c
index 136d979e9586..10299f2e9d2a 100644
--- a/tools/power/cpupower/utils/cpupower-info.c
+++ b/tools/power/cpupower/utils/cpupower-info.c
@@ -17,8 +17,8 @@
17#include "helpers/sysfs.h" 17#include "helpers/sysfs.h"
18 18
19static struct option set_opts[] = { 19static struct option set_opts[] = {
20 { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, 20 {"perf-bias", optional_argument, NULL, 'b'},
21 { }, 21 { },
22}; 22};
23 23
24static void print_wrong_arg_exit(void) 24static void print_wrong_arg_exit(void)
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index 573c75f8e3f5..3e6f374f8dd7 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -18,7 +18,7 @@
18#include "helpers/bitmask.h" 18#include "helpers/bitmask.h"
19 19
20static struct option set_opts[] = { 20static struct option set_opts[] = {
21 { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'}, 21 {"perf-bias", required_argument, NULL, 'b'},
22 { }, 22 { },
23}; 23};
24 24
diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c
index cea398c176e7..9cbb7fd75171 100644
--- a/tools/power/cpupower/utils/helpers/topology.c
+++ b/tools/power/cpupower/utils/helpers/topology.c
@@ -73,18 +73,22 @@ int get_cpu_topology(struct cpupower_topology *cpu_top)
73 for (cpu = 0; cpu < cpus; cpu++) { 73 for (cpu = 0; cpu < cpus; cpu++) {
74 cpu_top->core_info[cpu].cpu = cpu; 74 cpu_top->core_info[cpu].cpu = cpu;
75 cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); 75 cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu);
76 if (!cpu_top->core_info[cpu].is_online)
77 continue;
78 if(sysfs_topology_read_file( 76 if(sysfs_topology_read_file(
79 cpu, 77 cpu,
80 "physical_package_id", 78 "physical_package_id",
81 &(cpu_top->core_info[cpu].pkg)) < 0) 79 &(cpu_top->core_info[cpu].pkg)) < 0) {
82 return -1; 80 cpu_top->core_info[cpu].pkg = -1;
81 cpu_top->core_info[cpu].core = -1;
82 continue;
83 }
83 if(sysfs_topology_read_file( 84 if(sysfs_topology_read_file(
84 cpu, 85 cpu,
85 "core_id", 86 "core_id",
86 &(cpu_top->core_info[cpu].core)) < 0) 87 &(cpu_top->core_info[cpu].core)) < 0) {
87 return -1; 88 cpu_top->core_info[cpu].pkg = -1;
89 cpu_top->core_info[cpu].core = -1;
90 continue;
91 }
88 } 92 }
89 93
90 qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info), 94 qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info),
@@ -95,12 +99,15 @@ int get_cpu_topology(struct cpupower_topology *cpu_top)
95 done by pkg value. */ 99 done by pkg value. */
96 last_pkg = cpu_top->core_info[0].pkg; 100 last_pkg = cpu_top->core_info[0].pkg;
97 for(cpu = 1; cpu < cpus; cpu++) { 101 for(cpu = 1; cpu < cpus; cpu++) {
98 if(cpu_top->core_info[cpu].pkg != last_pkg) { 102 if (cpu_top->core_info[cpu].pkg != last_pkg &&
103 cpu_top->core_info[cpu].pkg != -1) {
104
99 last_pkg = cpu_top->core_info[cpu].pkg; 105 last_pkg = cpu_top->core_info[cpu].pkg;
100 cpu_top->pkgs++; 106 cpu_top->pkgs++;
101 } 107 }
102 } 108 }
103 cpu_top->pkgs++; 109 if (!cpu_top->core_info[0].pkg == -1)
110 cpu_top->pkgs++;
104 111
105 /* Intel's cores count is not consecutively numbered, there may 112 /* Intel's cores count is not consecutively numbered, there may
106 * be a core_id of 3, but none of 2. Assume there always is 0 113 * be a core_id of 3, but none of 2. Assume there always is 0
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
index c4bae9203a69..05f953f0f0a0 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
@@ -143,6 +143,9 @@ void print_results(int topology_depth, int cpu)
143 /* Be careful CPUs may got resorted for pkg value do not just use cpu */ 143 /* Be careful CPUs may got resorted for pkg value do not just use cpu */
144 if (!bitmask_isbitset(cpus_chosen, cpu_top.core_info[cpu].cpu)) 144 if (!bitmask_isbitset(cpus_chosen, cpu_top.core_info[cpu].cpu))
145 return; 145 return;
146 if (!cpu_top.core_info[cpu].is_online &&
147 cpu_top.core_info[cpu].pkg == -1)
148 return;
146 149
147 if (topology_depth > 2) 150 if (topology_depth > 2)
148 printf("%4d|", cpu_top.core_info[cpu].pkg); 151 printf("%4d|", cpu_top.core_info[cpu].pkg);
@@ -191,7 +194,8 @@ void print_results(int topology_depth, int cpu)
191 * It's up to the monitor plug-in to check .is_online, this one 194 * It's up to the monitor plug-in to check .is_online, this one
192 * is just for additional info. 195 * is just for additional info.
193 */ 196 */
194 if (!cpu_top.core_info[cpu].is_online) { 197 if (!cpu_top.core_info[cpu].is_online &&
198 cpu_top.core_info[cpu].pkg != -1) {
195 printf(_(" *is offline\n")); 199 printf(_(" *is offline\n"));
196 return; 200 return;
197 } else 201 } else
@@ -388,6 +392,9 @@ int cmd_monitor(int argc, char **argv)
388 return EXIT_FAILURE; 392 return EXIT_FAILURE;
389 } 393 }
390 394
395 if (!cpu_top.core_info[0].is_online)
396 printf("WARNING: at least one cpu is offline\n");
397
391 /* Default is: monitor all CPUs */ 398 /* Default is: monitor all CPUs */
392 if (bitmask_isallclear(cpus_chosen)) 399 if (bitmask_isallclear(cpus_chosen))
393 bitmask_setall(cpus_chosen); 400 bitmask_setall(cpus_chosen);
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 48c6e1ac6827..b9d3a32cbc04 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -137,6 +137,8 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
137void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) 137void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
138{ 138{
139 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 139 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
140 bool phys_active;
141 int ret;
140 142
141 /* 143 /*
142 * We're about to run this vcpu again, so there is no need to 144 * We're about to run this vcpu again, so there is no need to
@@ -151,6 +153,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
151 */ 153 */
152 if (kvm_timer_should_fire(vcpu)) 154 if (kvm_timer_should_fire(vcpu))
153 kvm_timer_inject_irq(vcpu); 155 kvm_timer_inject_irq(vcpu);
156
157 /*
158 * We keep track of whether the edge-triggered interrupt has been
159 * signalled to the vgic/guest, and if so, we mask the interrupt and
160 * the physical distributor to prevent the timer from raising a
161 * physical interrupt whenever we run a guest, preventing forward
162 * VCPU progress.
163 */
164 if (kvm_vgic_get_phys_irq_active(timer->map))
165 phys_active = true;
166 else
167 phys_active = false;
168
169 ret = irq_set_irqchip_state(timer->map->irq,
170 IRQCHIP_STATE_ACTIVE,
171 phys_active);
172 WARN_ON(ret);
154} 173}
155 174
156/** 175/**
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 6bd1c9bf7ae7..66c66165e712 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -531,6 +531,34 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm,
531 return false; 531 return false;
532} 532}
533 533
534/*
535 * If a mapped interrupt's state has been modified by the guest such that it
536 * is no longer active or pending, without it have gone through the sync path,
537 * then the map->active field must be cleared so the interrupt can be taken
538 * again.
539 */
540static void vgic_handle_clear_mapped_irq(struct kvm_vcpu *vcpu)
541{
542 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
543 struct list_head *root;
544 struct irq_phys_map_entry *entry;
545 struct irq_phys_map *map;
546
547 rcu_read_lock();
548
549 /* Check for PPIs */
550 root = &vgic_cpu->irq_phys_map_list;
551 list_for_each_entry_rcu(entry, root, entry) {
552 map = &entry->map;
553
554 if (!vgic_dist_irq_is_pending(vcpu, map->virt_irq) &&
555 !vgic_irq_is_active(vcpu, map->virt_irq))
556 map->active = false;
557 }
558
559 rcu_read_unlock();
560}
561
534bool vgic_handle_clear_pending_reg(struct kvm *kvm, 562bool vgic_handle_clear_pending_reg(struct kvm *kvm,
535 struct kvm_exit_mmio *mmio, 563 struct kvm_exit_mmio *mmio,
536 phys_addr_t offset, int vcpu_id) 564 phys_addr_t offset, int vcpu_id)
@@ -561,6 +589,7 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm,
561 vcpu_id, offset); 589 vcpu_id, offset);
562 vgic_reg_access(mmio, reg, offset, mode); 590 vgic_reg_access(mmio, reg, offset, mode);
563 591
592 vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
564 vgic_update_state(kvm); 593 vgic_update_state(kvm);
565 return true; 594 return true;
566 } 595 }
@@ -598,6 +627,7 @@ bool vgic_handle_clear_active_reg(struct kvm *kvm,
598 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); 627 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
599 628
600 if (mmio->is_write) { 629 if (mmio->is_write) {
630 vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
601 vgic_update_state(kvm); 631 vgic_update_state(kvm);
602 return true; 632 return true;
603 } 633 }
@@ -982,6 +1012,12 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
982 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; 1012 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
983 pend_shared = vcpu->arch.vgic_cpu.pending_shared; 1013 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
984 1014
1015 if (!dist->enabled) {
1016 bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS);
1017 bitmap_zero(pend_shared, nr_shared);
1018 return 0;
1019 }
1020
985 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id); 1021 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
986 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); 1022 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
987 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); 1023 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
@@ -1009,11 +1045,6 @@ void vgic_update_state(struct kvm *kvm)
1009 struct kvm_vcpu *vcpu; 1045 struct kvm_vcpu *vcpu;
1010 int c; 1046 int c;
1011 1047
1012 if (!dist->enabled) {
1013 set_bit(0, dist->irq_pending_on_cpu);
1014 return;
1015 }
1016
1017 kvm_for_each_vcpu(c, vcpu, kvm) { 1048 kvm_for_each_vcpu(c, vcpu, kvm) {
1018 if (compute_pending_for_cpu(vcpu)) 1049 if (compute_pending_for_cpu(vcpu))
1019 set_bit(c, dist->irq_pending_on_cpu); 1050 set_bit(c, dist->irq_pending_on_cpu);
@@ -1092,6 +1123,15 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
1092 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1123 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1093 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); 1124 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1094 1125
1126 /*
1127 * We must transfer the pending state back to the distributor before
1128 * retiring the LR, otherwise we may loose edge-triggered interrupts.
1129 */
1130 if (vlr.state & LR_STATE_PENDING) {
1131 vgic_dist_irq_set_pending(vcpu, irq);
1132 vlr.hwirq = 0;
1133 }
1134
1095 vlr.state = 0; 1135 vlr.state = 0;
1096 vgic_set_lr(vcpu, lr_nr, vlr); 1136 vgic_set_lr(vcpu, lr_nr, vlr);
1097 clear_bit(lr_nr, vgic_cpu->lr_used); 1137 clear_bit(lr_nr, vgic_cpu->lr_used);
@@ -1132,7 +1172,8 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
1132 kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state); 1172 kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
1133 vgic_irq_clear_active(vcpu, irq); 1173 vgic_irq_clear_active(vcpu, irq);
1134 vgic_update_state(vcpu->kvm); 1174 vgic_update_state(vcpu->kvm);
1135 } else if (vgic_dist_irq_is_pending(vcpu, irq)) { 1175 } else {
1176 WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq));
1136 vlr.state |= LR_STATE_PENDING; 1177 vlr.state |= LR_STATE_PENDING;
1137 kvm_debug("Set pending: 0x%x\n", vlr.state); 1178 kvm_debug("Set pending: 0x%x\n", vlr.state);
1138 } 1179 }
@@ -1240,7 +1281,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1240 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1281 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1241 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1282 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1242 unsigned long *pa_percpu, *pa_shared; 1283 unsigned long *pa_percpu, *pa_shared;
1243 int i, vcpu_id, lr, ret; 1284 int i, vcpu_id;
1244 int overflow = 0; 1285 int overflow = 0;
1245 int nr_shared = vgic_nr_shared_irqs(dist); 1286 int nr_shared = vgic_nr_shared_irqs(dist);
1246 1287
@@ -1295,31 +1336,6 @@ epilog:
1295 */ 1336 */
1296 clear_bit(vcpu_id, dist->irq_pending_on_cpu); 1337 clear_bit(vcpu_id, dist->irq_pending_on_cpu);
1297 } 1338 }
1298
1299 for (lr = 0; lr < vgic->nr_lr; lr++) {
1300 struct vgic_lr vlr;
1301
1302 if (!test_bit(lr, vgic_cpu->lr_used))
1303 continue;
1304
1305 vlr = vgic_get_lr(vcpu, lr);
1306
1307 /*
1308 * If we have a mapping, and the virtual interrupt is
1309 * presented to the guest (as pending or active), then we must
1310 * set the state to active in the physical world. See
1311 * Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt.
1312 */
1313 if (vlr.state & LR_HW) {
1314 struct irq_phys_map *map;
1315 map = vgic_irq_map_search(vcpu, vlr.irq);
1316
1317 ret = irq_set_irqchip_state(map->irq,
1318 IRQCHIP_STATE_ACTIVE,
1319 true);
1320 WARN_ON(ret);
1321 }
1322 }
1323} 1339}
1324 1340
1325static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) 1341static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
@@ -1421,7 +1437,7 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
1421 return 0; 1437 return 0;
1422 1438
1423 map = vgic_irq_map_search(vcpu, vlr.irq); 1439 map = vgic_irq_map_search(vcpu, vlr.irq);
1424 BUG_ON(!map || !map->active); 1440 BUG_ON(!map);
1425 1441
1426 ret = irq_get_irqchip_state(map->irq, 1442 ret = irq_get_irqchip_state(map->irq,
1427 IRQCHIP_STATE_ACTIVE, 1443 IRQCHIP_STATE_ACTIVE,
@@ -1429,13 +1445,8 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
1429 1445
1430 WARN_ON(ret); 1446 WARN_ON(ret);
1431 1447
1432 if (map->active) { 1448 if (map->active)
1433 ret = irq_set_irqchip_state(map->irq,
1434 IRQCHIP_STATE_ACTIVE,
1435 false);
1436 WARN_ON(ret);
1437 return 0; 1449 return 0;
1438 }
1439 1450
1440 return 1; 1451 return 1;
1441} 1452}
@@ -1607,8 +1618,12 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1607 } else { 1618 } else {
1608 if (level_triggered) { 1619 if (level_triggered) {
1609 vgic_dist_irq_clear_level(vcpu, irq_num); 1620 vgic_dist_irq_clear_level(vcpu, irq_num);
1610 if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) 1621 if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) {
1611 vgic_dist_irq_clear_pending(vcpu, irq_num); 1622 vgic_dist_irq_clear_pending(vcpu, irq_num);
1623 vgic_cpu_irq_clear(vcpu, irq_num);
1624 if (!compute_pending_for_cpu(vcpu))
1625 clear_bit(cpuid, dist->irq_pending_on_cpu);
1626 }
1612 } 1627 }
1613 1628
1614 ret = false; 1629 ret = false;