aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-05-31 06:27:56 -0400
committerIngo Molnar <mingo@kernel.org>2018-05-31 06:27:56 -0400
commitc52b5c5f96b217854a953689f65234f7448c7f47 (patch)
treecb9e41e3c25965e550b28f5aa5467cce5983125a
parent10b1105004fbd81058383537b67df35cc188ab62 (diff)
parent786b71f5b754273ccef6d9462e52062b3e1f9877 (diff)
Merge branch 'linus' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/networking/ppp_generic.txt6
-rw-r--r--MAINTAINERS16
-rw-r--r--Makefile11
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi6
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-one.dts1
-rw-r--r--arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts2
-rw-r--r--arch/arm/mach-ep93xx/core.c2
-rw-r--r--arch/arm/mach-ixp4xx/avila-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/fsg-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c2
-rw-r--r--arch/arm/mach-pxa/palmz72.c2
-rw-r--r--arch/arm/mach-pxa/viper.c4
-rw-r--r--arch/arm/mach-sa1100/simpad.c2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts1
-rw-r--r--arch/arm64/include/asm/atomic_lse.h24
-rw-r--r--arch/arm64/kernel/arm64ksyms.c8
-rw-r--r--arch/arm64/lib/tishift.S15
-rw-r--r--arch/arm64/mm/fault.c51
-rw-r--r--arch/arm64/mm/mmu.c16
-rw-r--r--arch/nds32/Kconfig7
-rw-r--r--arch/nds32/Kconfig.cpu5
-rw-r--r--arch/nds32/Makefile7
-rw-r--r--arch/nds32/include/asm/Kbuild2
-rw-r--r--arch/nds32/include/asm/bitfield.h3
-rw-r--r--arch/nds32/include/asm/cacheflush.h2
-rw-r--r--arch/nds32/include/asm/io.h2
-rw-r--r--arch/nds32/include/asm/page.h3
-rw-r--r--arch/nds32/include/asm/pgtable.h1
-rw-r--r--arch/nds32/kernel/ex-entry.S2
-rw-r--r--arch/nds32/kernel/head.S28
-rw-r--r--arch/nds32/kernel/setup.c3
-rw-r--r--arch/nds32/kernel/stacktrace.c2
-rw-r--r--arch/nds32/kernel/vdso.c10
-rw-r--r--arch/nds32/lib/copy_page.S3
-rw-r--r--arch/nds32/mm/alignment.c9
-rw-r--r--arch/nds32/mm/cacheflush.c74
-rw-r--r--arch/nds32/mm/init.c1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S6
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S97
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c108
-rw-r--r--arch/s390/kvm/vsie.c2
-rw-r--r--arch/x86/kernel/cpu/common.c22
-rw-r--r--arch/x86/kvm/cpuid.c9
-rw-r--r--arch/x86/kvm/hyperv.c19
-rw-r--r--arch/x86/kvm/lapic.c16
-rw-r--r--arch/x86/kvm/x86.c17
-rw-r--r--drivers/base/node.c5
-rw-r--r--drivers/bcma/driver_mips.c2
-rw-r--r--drivers/firmware/qcom_scm-32.c8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c48
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.h25
-rw-r--r--drivers/isdn/hardware/eicon/diva.c22
-rw-r--r--drivers/isdn/hardware/eicon/diva.h5
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c18
-rw-r--r--drivers/mmc/core/block.c2
-rw-r--r--drivers/mmc/host/sdhci-iproc.c33
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c10
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c14
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2
-rw-r--r--drivers/net/phy/bcm-cygnus.c6
-rw-r--r--drivers/net/phy/bcm-phy-lib.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.h7
-rw-r--r--drivers/net/phy/bcm7xxx.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c27
-rw-r--r--drivers/net/tun.c19
-rw-r--r--drivers/net/virtio_net.c21
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/ssb/Kconfig4
-rw-r--r--drivers/vhost/vhost.c3
-rw-r--r--fs/ocfs2/cluster/heartbeat.c11
-rw-r--r--fs/seq_file.c5
-rw-r--r--include/linux/bpf_verifier.h2
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/node.h8
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/trace/events/sched.h4
-rw-r--r--include/uapi/linux/nl80211.h2
-rw-r--r--include/uapi/linux/ppp-ioctl.h2
-rw-r--r--init/main.c1
-rw-r--r--ipc/shm.c19
-rw-r--r--kernel/bpf/verifier.c86
-rw-r--r--kernel/kthread.c6
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/sys.c5
-rw-r--r--lib/radix-tree.c4
-rw-r--r--mm/kasan/kasan.c66
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/page_alloc.c16
-rw-r--r--mm/swapfile.c7
-rw-r--r--net/batman-adv/multicast.c2
-rw-r--r--net/batman-adv/translation-table.c84
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ipmr_base.c5
-rw-r--r--net/mac80211/mesh_plink.c8
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/socket.c51
-rw-r--r--net/wireless/nl80211.c3
-rw-r--r--net/wireless/reg.c3
-rwxr-xr-xscripts/checkpatch.pl2
-rw-r--r--sound/core/timer.c4
-rw-r--r--sound/pci/hda/hda_local.h6
-rw-r--r--tools/testing/radix-tree/idr-test.c7
-rw-r--r--tools/testing/selftests/bpf/config2
-rw-r--r--tools/testing/selftests/net/config5
-rw-r--r--tools/testing/selftests/net/reuseport_bpf_numa.c4
130 files changed, 1048 insertions, 481 deletions
diff --git a/Documentation/networking/ppp_generic.txt b/Documentation/networking/ppp_generic.txt
index 091d20273dcb..61daf4b39600 100644
--- a/Documentation/networking/ppp_generic.txt
+++ b/Documentation/networking/ppp_generic.txt
@@ -300,12 +300,6 @@ unattached instance are:
300The ioctl calls available on an instance of /dev/ppp attached to a 300The ioctl calls available on an instance of /dev/ppp attached to a
301channel are: 301channel are:
302 302
303* PPPIOCDETACH detaches the instance from the channel. This ioctl is
304 deprecated since the same effect can be achieved by closing the
305 instance. In order to prevent possible races this ioctl will fail
306 with an EINVAL error if more than one file descriptor refers to this
307 instance (i.e. as a result of dup(), dup2() or fork()).
308
309* PPPIOCCONNECT connects this channel to a PPP interface. The 303* PPPIOCCONNECT connects this channel to a PPP interface. The
310 argument should point to an int containing the interface unit 304 argument should point to an int containing the interface unit
311 number. It will return an EINVAL error if the channel is already 305 number. It will return an EINVAL error if the channel is already
diff --git a/MAINTAINERS b/MAINTAINERS
index a9ca122957e9..ca4afd68530c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2332,7 +2332,7 @@ F: drivers/gpio/gpio-ath79.c
2332F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt 2332F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt
2333 2333
2334ATHEROS ATH GENERIC UTILITIES 2334ATHEROS ATH GENERIC UTILITIES
2335M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com> 2335M: Kalle Valo <kvalo@codeaurora.org>
2336L: linux-wireless@vger.kernel.org 2336L: linux-wireless@vger.kernel.org
2337S: Supported 2337S: Supported
2338F: drivers/net/wireless/ath/* 2338F: drivers/net/wireless/ath/*
@@ -2347,7 +2347,7 @@ S: Maintained
2347F: drivers/net/wireless/ath/ath5k/ 2347F: drivers/net/wireless/ath/ath5k/
2348 2348
2349ATHEROS ATH6KL WIRELESS DRIVER 2349ATHEROS ATH6KL WIRELESS DRIVER
2350M: Kalle Valo <kvalo@qca.qualcomm.com> 2350M: Kalle Valo <kvalo@codeaurora.org>
2351L: linux-wireless@vger.kernel.org 2351L: linux-wireless@vger.kernel.org
2352W: http://wireless.kernel.org/en/users/Drivers/ath6kl 2352W: http://wireless.kernel.org/en/users/Drivers/ath6kl
2353T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git 2353T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
@@ -6503,9 +6503,15 @@ F: Documentation/networking/hinic.txt
6503F: drivers/net/ethernet/huawei/hinic/ 6503F: drivers/net/ethernet/huawei/hinic/
6504 6504
6505HUGETLB FILESYSTEM 6505HUGETLB FILESYSTEM
6506M: Nadia Yvette Chambers <nyc@holomorphy.com> 6506M: Mike Kravetz <mike.kravetz@oracle.com>
6507L: linux-mm@kvack.org
6507S: Maintained 6508S: Maintained
6508F: fs/hugetlbfs/ 6509F: fs/hugetlbfs/
6510F: mm/hugetlb.c
6511F: include/linux/hugetlb.h
6512F: Documentation/admin-guide/mm/hugetlbpage.rst
6513F: Documentation/vm/hugetlbfs_reserv.rst
6514F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
6509 6515
6510HVA ST MEDIA DRIVER 6516HVA ST MEDIA DRIVER
6511M: Jean-Christophe Trotin <jean-christophe.trotin@st.com> 6517M: Jean-Christophe Trotin <jean-christophe.trotin@st.com>
@@ -11626,7 +11632,7 @@ S: Maintained
11626F: drivers/media/tuners/qt1010* 11632F: drivers/media/tuners/qt1010*
11627 11633
11628QUALCOMM ATHEROS ATH10K WIRELESS DRIVER 11634QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
11629M: Kalle Valo <kvalo@qca.qualcomm.com> 11635M: Kalle Valo <kvalo@codeaurora.org>
11630L: ath10k@lists.infradead.org 11636L: ath10k@lists.infradead.org
11631W: http://wireless.kernel.org/en/users/Drivers/ath10k 11637W: http://wireless.kernel.org/en/users/Drivers/ath10k
11632T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git 11638T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
@@ -11677,7 +11683,7 @@ S: Maintained
11677F: drivers/media/platform/qcom/venus/ 11683F: drivers/media/platform/qcom/venus/
11678 11684
11679QUALCOMM WCN36XX WIRELESS DRIVER 11685QUALCOMM WCN36XX WIRELESS DRIVER
11680M: Eugene Krasnikov <k.eugene.e@gmail.com> 11686M: Kalle Valo <kvalo@codeaurora.org>
11681L: wcn36xx@lists.infradead.org 11687L: wcn36xx@lists.infradead.org
11682W: http://wireless.kernel.org/en/users/Drivers/wcn36xx 11688W: http://wireless.kernel.org/en/users/Drivers/wcn36xx
11683T: git git://github.com/KrasnikovEugene/wcn36xx.git 11689T: git git://github.com/KrasnikovEugene/wcn36xx.git
diff --git a/Makefile b/Makefile
index ec6f45928fd4..56ba070dfa09 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 17 3PATCHLEVEL = 17
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION = -rc7
6NAME = Merciless Moray 6NAME = Merciless Moray
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -500,6 +500,9 @@ RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
500RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) 500RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
501export RETPOLINE_CFLAGS 501export RETPOLINE_CFLAGS
502 502
503KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
504KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
505
503# check for 'asm goto' 506# check for 'asm goto'
504ifeq ($(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) 507ifeq ($(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
505 CC_HAVE_ASM_GOTO := 1 508 CC_HAVE_ASM_GOTO := 1
@@ -621,9 +624,9 @@ endif # $(dot-config)
621# Defaults to vmlinux, but the arch makefile usually adds further targets 624# Defaults to vmlinux, but the arch makefile usually adds further targets
622all: vmlinux 625all: vmlinux
623 626
624KBUILD_CFLAGS += $(call cc-option,-fno-PIE) 627CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \
625KBUILD_AFLAGS += $(call cc-option,-fno-PIE) 628 $(call cc-option,-fno-tree-loop-im) \
626CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) 629 $(call cc-disable-warning,maybe-uninitialized,)
627export CFLAGS_GCOV CFLAGS_KCOV 630export CFLAGS_GCOV CFLAGS_KCOV
628 631
629# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default 632# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 77e8436beed4..3a1c6b45c9a1 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -76,7 +76,7 @@
76 allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi"; 76 allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
77 clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_HDMI0>, 77 clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_HDMI0>,
78 <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, 78 <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
79 <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, 79 <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>,
80 <&ccu CLK_TCON0_CH1>, <&ccu CLK_HDMI>, 80 <&ccu CLK_TCON0_CH1>, <&ccu CLK_HDMI>,
81 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; 81 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
82 status = "disabled"; 82 status = "disabled";
@@ -88,7 +88,7 @@
88 allwinner,pipeline = "de_fe0-de_be0-lcd0"; 88 allwinner,pipeline = "de_fe0-de_be0-lcd0";
89 clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_DE_BE0>, 89 clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_DE_BE0>,
90 <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_BE0>, 90 <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_BE0>,
91 <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_TCON0_CH0>, 91 <&ccu CLK_DE_FE0>, <&ccu CLK_TCON0_CH0>,
92 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; 92 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
93 status = "disabled"; 93 status = "disabled";
94 }; 94 };
@@ -99,7 +99,7 @@
99 allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0"; 99 allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
100 clocks = <&ccu CLK_AHB_TVE0>, <&ccu CLK_AHB_LCD0>, 100 clocks = <&ccu CLK_AHB_TVE0>, <&ccu CLK_AHB_LCD0>,
101 <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, 101 <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
102 <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, 102 <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>,
103 <&ccu CLK_TCON0_CH1>, <&ccu CLK_DRAM_TVE0>, 103 <&ccu CLK_TCON0_CH1>, <&ccu CLK_DRAM_TVE0>,
104 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; 104 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
105 status = "disabled"; 105 status = "disabled";
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
index 3328fe583c9b..232f124ce62c 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
@@ -117,6 +117,7 @@
117 phy-handle = <&int_mii_phy>; 117 phy-handle = <&int_mii_phy>;
118 phy-mode = "mii"; 118 phy-mode = "mii";
119 allwinner,leds-active-low; 119 allwinner,leds-active-low;
120 status = "okay";
120}; 121};
121 122
122&hdmi { 123&hdmi {
diff --git a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts
index d1311098ea45..ad173605b1b8 100644
--- a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts
+++ b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts
@@ -51,7 +51,7 @@
51 51
52 leds { 52 leds {
53 /* The LEDs use PG0~2 pins, which conflict with MMC1 */ 53 /* The LEDs use PG0~2 pins, which conflict with MMC1 */
54 status = "disbaled"; 54 status = "disabled";
55 }; 55 };
56}; 56};
57 57
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index e70feec6fad5..0581ffbedddd 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -323,7 +323,7 @@ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr)
323 323
324/* All EP93xx devices use the same two GPIO pins for I2C bit-banging */ 324/* All EP93xx devices use the same two GPIO pins for I2C bit-banging */
325static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = { 325static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
326 .dev_id = "i2c-gpio", 326 .dev_id = "i2c-gpio.0",
327 .table = { 327 .table = {
328 /* Use local offsets on gpiochip/port "G" */ 328 /* Use local offsets on gpiochip/port "G" */
329 GPIO_LOOKUP_IDX("G", 1, NULL, 0, 329 GPIO_LOOKUP_IDX("G", 1, NULL, 0,
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c
index 77def6169f50..44cbbce6bda6 100644
--- a/arch/arm/mach-ixp4xx/avila-setup.c
+++ b/arch/arm/mach-ixp4xx/avila-setup.c
@@ -51,7 +51,7 @@ static struct platform_device avila_flash = {
51}; 51};
52 52
53static struct gpiod_lookup_table avila_i2c_gpiod_table = { 53static struct gpiod_lookup_table avila_i2c_gpiod_table = {
54 .dev_id = "i2c-gpio", 54 .dev_id = "i2c-gpio.0",
55 .table = { 55 .table = {
56 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", AVILA_SDA_PIN, 56 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", AVILA_SDA_PIN,
57 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 57 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index 0f5c99941a7d..397190f3a8da 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -70,7 +70,7 @@ static struct platform_device dsmg600_flash = {
70}; 70};
71 71
72static struct gpiod_lookup_table dsmg600_i2c_gpiod_table = { 72static struct gpiod_lookup_table dsmg600_i2c_gpiod_table = {
73 .dev_id = "i2c-gpio", 73 .dev_id = "i2c-gpio.0",
74 .table = { 74 .table = {
75 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", DSMG600_SDA_PIN, 75 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", DSMG600_SDA_PIN,
76 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 76 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c
index 033f79b35d51..f0a152e365b1 100644
--- a/arch/arm/mach-ixp4xx/fsg-setup.c
+++ b/arch/arm/mach-ixp4xx/fsg-setup.c
@@ -56,7 +56,7 @@ static struct platform_device fsg_flash = {
56}; 56};
57 57
58static struct gpiod_lookup_table fsg_i2c_gpiod_table = { 58static struct gpiod_lookup_table fsg_i2c_gpiod_table = {
59 .dev_id = "i2c-gpio", 59 .dev_id = "i2c-gpio.0",
60 .table = { 60 .table = {
61 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", FSG_SDA_PIN, 61 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", FSG_SDA_PIN,
62 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 62 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index b168e2fbdbeb..3ec829d52cdd 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -124,7 +124,7 @@ static struct platform_device ixdp425_flash_nand = {
124#endif /* CONFIG_MTD_NAND_PLATFORM */ 124#endif /* CONFIG_MTD_NAND_PLATFORM */
125 125
126static struct gpiod_lookup_table ixdp425_i2c_gpiod_table = { 126static struct gpiod_lookup_table ixdp425_i2c_gpiod_table = {
127 .dev_id = "i2c-gpio", 127 .dev_id = "i2c-gpio.0",
128 .table = { 128 .table = {
129 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", IXDP425_SDA_PIN, 129 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", IXDP425_SDA_PIN,
130 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 130 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index 76dfff03cb71..4138d6aa4c52 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -102,7 +102,7 @@ static struct platform_device nas100d_leds = {
102}; 102};
103 103
104static struct gpiod_lookup_table nas100d_i2c_gpiod_table = { 104static struct gpiod_lookup_table nas100d_i2c_gpiod_table = {
105 .dev_id = "i2c-gpio", 105 .dev_id = "i2c-gpio.0",
106 .table = { 106 .table = {
107 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NAS100D_SDA_PIN, 107 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NAS100D_SDA_PIN,
108 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 108 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index 91da63a7d7b5..341b263482ef 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -70,7 +70,7 @@ static struct platform_device nslu2_flash = {
70}; 70};
71 71
72static struct gpiod_lookup_table nslu2_i2c_gpiod_table = { 72static struct gpiod_lookup_table nslu2_i2c_gpiod_table = {
73 .dev_id = "i2c-gpio", 73 .dev_id = "i2c-gpio.0",
74 .table = { 74 .table = {
75 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NSLU2_SDA_PIN, 75 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NSLU2_SDA_PIN,
76 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 76 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index 5877e547cecd..0adb1bd6208e 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -322,7 +322,7 @@ static struct soc_camera_link palmz72_iclink = {
322}; 322};
323 323
324static struct gpiod_lookup_table palmz72_i2c_gpiod_table = { 324static struct gpiod_lookup_table palmz72_i2c_gpiod_table = {
325 .dev_id = "i2c-gpio", 325 .dev_id = "i2c-gpio.0",
326 .table = { 326 .table = {
327 GPIO_LOOKUP_IDX("gpio-pxa", 118, NULL, 0, 327 GPIO_LOOKUP_IDX("gpio-pxa", 118, NULL, 0,
328 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 328 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 90d0f277de55..207dcc2e94e7 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -460,7 +460,7 @@ static struct platform_device smc91x_device = {
460 460
461/* i2c */ 461/* i2c */
462static struct gpiod_lookup_table viper_i2c_gpiod_table = { 462static struct gpiod_lookup_table viper_i2c_gpiod_table = {
463 .dev_id = "i2c-gpio", 463 .dev_id = "i2c-gpio.1",
464 .table = { 464 .table = {
465 GPIO_LOOKUP_IDX("gpio-pxa", VIPER_RTC_I2C_SDA_GPIO, 465 GPIO_LOOKUP_IDX("gpio-pxa", VIPER_RTC_I2C_SDA_GPIO,
466 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 466 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
@@ -789,7 +789,7 @@ static int __init viper_tpm_setup(char *str)
789__setup("tpm=", viper_tpm_setup); 789__setup("tpm=", viper_tpm_setup);
790 790
791struct gpiod_lookup_table viper_tpm_i2c_gpiod_table = { 791struct gpiod_lookup_table viper_tpm_i2c_gpiod_table = {
792 .dev_id = "i2c-gpio", 792 .dev_id = "i2c-gpio.2",
793 .table = { 793 .table = {
794 GPIO_LOOKUP_IDX("gpio-pxa", VIPER_TPM_I2C_SDA_GPIO, 794 GPIO_LOOKUP_IDX("gpio-pxa", VIPER_TPM_I2C_SDA_GPIO,
795 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 795 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index ace010479eb6..f45aed2519ba 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -327,7 +327,7 @@ static struct platform_device simpad_gpio_leds = {
327 * i2c 327 * i2c
328 */ 328 */
329static struct gpiod_lookup_table simpad_i2c_gpiod_table = { 329static struct gpiod_lookup_table simpad_i2c_gpiod_table = {
330 .dev_id = "i2c-gpio", 330 .dev_id = "i2c-gpio.0",
331 .table = { 331 .table = {
332 GPIO_LOOKUP_IDX("gpio", 21, NULL, 0, 332 GPIO_LOOKUP_IDX("gpio", 21, NULL, 0,
333 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 333 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 724a0d3b7683..edb4ee0b8896 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -299,7 +299,6 @@
299 /* GPIO blocks 16 thru 19 do not appear to be routed to pins */ 299 /* GPIO blocks 16 thru 19 do not appear to be routed to pins */
300 300
301 dwmmc_0: dwmmc0@f723d000 { 301 dwmmc_0: dwmmc0@f723d000 {
302 max-frequency = <150000000>;
303 cap-mmc-highspeed; 302 cap-mmc-highspeed;
304 mmc-hs200-1_8v; 303 mmc-hs200-1_8v;
305 non-removable; 304 non-removable;
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 9ef0797380cb..f9b0b09153e0 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v)
117 /* LSE atomics */ 117 /* LSE atomics */
118 " mvn %w[i], %w[i]\n" 118 " mvn %w[i], %w[i]\n"
119 " stclr %w[i], %[v]") 119 " stclr %w[i], %[v]")
120 : [i] "+r" (w0), [v] "+Q" (v->counter) 120 : [i] "+&r" (w0), [v] "+Q" (v->counter)
121 : "r" (x1) 121 : "r" (x1)
122 : __LL_SC_CLOBBERS); 122 : __LL_SC_CLOBBERS);
123} 123}
@@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
135 /* LSE atomics */ \ 135 /* LSE atomics */ \
136 " mvn %w[i], %w[i]\n" \ 136 " mvn %w[i], %w[i]\n" \
137 " ldclr" #mb " %w[i], %w[i], %[v]") \ 137 " ldclr" #mb " %w[i], %w[i], %[v]") \
138 : [i] "+r" (w0), [v] "+Q" (v->counter) \ 138 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
139 : "r" (x1) \ 139 : "r" (x1) \
140 : __LL_SC_CLOBBERS, ##cl); \ 140 : __LL_SC_CLOBBERS, ##cl); \
141 \ 141 \
@@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v)
161 /* LSE atomics */ 161 /* LSE atomics */
162 " neg %w[i], %w[i]\n" 162 " neg %w[i], %w[i]\n"
163 " stadd %w[i], %[v]") 163 " stadd %w[i], %[v]")
164 : [i] "+r" (w0), [v] "+Q" (v->counter) 164 : [i] "+&r" (w0), [v] "+Q" (v->counter)
165 : "r" (x1) 165 : "r" (x1)
166 : __LL_SC_CLOBBERS); 166 : __LL_SC_CLOBBERS);
167} 167}
@@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
180 " neg %w[i], %w[i]\n" \ 180 " neg %w[i], %w[i]\n" \
181 " ldadd" #mb " %w[i], w30, %[v]\n" \ 181 " ldadd" #mb " %w[i], w30, %[v]\n" \
182 " add %w[i], %w[i], w30") \ 182 " add %w[i], %w[i], w30") \
183 : [i] "+r" (w0), [v] "+Q" (v->counter) \ 183 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
184 : "r" (x1) \ 184 : "r" (x1) \
185 : __LL_SC_CLOBBERS , ##cl); \ 185 : __LL_SC_CLOBBERS , ##cl); \
186 \ 186 \
@@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
207 /* LSE atomics */ \ 207 /* LSE atomics */ \
208 " neg %w[i], %w[i]\n" \ 208 " neg %w[i], %w[i]\n" \
209 " ldadd" #mb " %w[i], %w[i], %[v]") \ 209 " ldadd" #mb " %w[i], %w[i], %[v]") \
210 : [i] "+r" (w0), [v] "+Q" (v->counter) \ 210 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
211 : "r" (x1) \ 211 : "r" (x1) \
212 : __LL_SC_CLOBBERS, ##cl); \ 212 : __LL_SC_CLOBBERS, ##cl); \
213 \ 213 \
@@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
314 /* LSE atomics */ 314 /* LSE atomics */
315 " mvn %[i], %[i]\n" 315 " mvn %[i], %[i]\n"
316 " stclr %[i], %[v]") 316 " stclr %[i], %[v]")
317 : [i] "+r" (x0), [v] "+Q" (v->counter) 317 : [i] "+&r" (x0), [v] "+Q" (v->counter)
318 : "r" (x1) 318 : "r" (x1)
319 : __LL_SC_CLOBBERS); 319 : __LL_SC_CLOBBERS);
320} 320}
@@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
332 /* LSE atomics */ \ 332 /* LSE atomics */ \
333 " mvn %[i], %[i]\n" \ 333 " mvn %[i], %[i]\n" \
334 " ldclr" #mb " %[i], %[i], %[v]") \ 334 " ldclr" #mb " %[i], %[i], %[v]") \
335 : [i] "+r" (x0), [v] "+Q" (v->counter) \ 335 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
336 : "r" (x1) \ 336 : "r" (x1) \
337 : __LL_SC_CLOBBERS, ##cl); \ 337 : __LL_SC_CLOBBERS, ##cl); \
338 \ 338 \
@@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
358 /* LSE atomics */ 358 /* LSE atomics */
359 " neg %[i], %[i]\n" 359 " neg %[i], %[i]\n"
360 " stadd %[i], %[v]") 360 " stadd %[i], %[v]")
361 : [i] "+r" (x0), [v] "+Q" (v->counter) 361 : [i] "+&r" (x0), [v] "+Q" (v->counter)
362 : "r" (x1) 362 : "r" (x1)
363 : __LL_SC_CLOBBERS); 363 : __LL_SC_CLOBBERS);
364} 364}
@@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
377 " neg %[i], %[i]\n" \ 377 " neg %[i], %[i]\n" \
378 " ldadd" #mb " %[i], x30, %[v]\n" \ 378 " ldadd" #mb " %[i], x30, %[v]\n" \
379 " add %[i], %[i], x30") \ 379 " add %[i], %[i], x30") \
380 : [i] "+r" (x0), [v] "+Q" (v->counter) \ 380 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
381 : "r" (x1) \ 381 : "r" (x1) \
382 : __LL_SC_CLOBBERS, ##cl); \ 382 : __LL_SC_CLOBBERS, ##cl); \
383 \ 383 \
@@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
404 /* LSE atomics */ \ 404 /* LSE atomics */ \
405 " neg %[i], %[i]\n" \ 405 " neg %[i], %[i]\n" \
406 " ldadd" #mb " %[i], %[i], %[v]") \ 406 " ldadd" #mb " %[i], %[i], %[v]") \
407 : [i] "+r" (x0), [v] "+Q" (v->counter) \ 407 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
408 : "r" (x1) \ 408 : "r" (x1) \
409 : __LL_SC_CLOBBERS, ##cl); \ 409 : __LL_SC_CLOBBERS, ##cl); \
410 \ 410 \
@@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
435 " sub x30, x30, %[ret]\n" 435 " sub x30, x30, %[ret]\n"
436 " cbnz x30, 1b\n" 436 " cbnz x30, 1b\n"
437 "2:") 437 "2:")
438 : [ret] "+r" (x0), [v] "+Q" (v->counter) 438 : [ret] "+&r" (x0), [v] "+Q" (v->counter)
439 : 439 :
440 : __LL_SC_CLOBBERS, "cc", "memory"); 440 : __LL_SC_CLOBBERS, "cc", "memory");
441 441
@@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
516 " eor %[old1], %[old1], %[oldval1]\n" \ 516 " eor %[old1], %[old1], %[oldval1]\n" \
517 " eor %[old2], %[old2], %[oldval2]\n" \ 517 " eor %[old2], %[old2], %[oldval2]\n" \
518 " orr %[old1], %[old1], %[old2]") \ 518 " orr %[old1], %[old1], %[old2]") \
519 : [old1] "+r" (x0), [old2] "+r" (x1), \ 519 : [old1] "+&r" (x0), [old2] "+&r" (x1), \
520 [v] "+Q" (*(unsigned long *)ptr) \ 520 [v] "+Q" (*(unsigned long *)ptr) \
521 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ 521 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
522 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ 522 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 66be504edb6c..d894a20b70b2 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -75,3 +75,11 @@ NOKPROBE_SYMBOL(_mcount);
75 /* arm-smccc */ 75 /* arm-smccc */
76EXPORT_SYMBOL(__arm_smccc_smc); 76EXPORT_SYMBOL(__arm_smccc_smc);
77EXPORT_SYMBOL(__arm_smccc_hvc); 77EXPORT_SYMBOL(__arm_smccc_hvc);
78
79 /* tishift.S */
80extern long long __ashlti3(long long a, int b);
81EXPORT_SYMBOL(__ashlti3);
82extern long long __ashrti3(long long a, int b);
83EXPORT_SYMBOL(__ashrti3);
84extern long long __lshrti3(long long a, int b);
85EXPORT_SYMBOL(__lshrti3);
diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S
index d3db9b2cd479..0fdff97794de 100644
--- a/arch/arm64/lib/tishift.S
+++ b/arch/arm64/lib/tishift.S
@@ -1,17 +1,6 @@
1/* 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
3 * 2 *
4 * This program is free software; you can redistribute it and/or modify 3 * Copyright (C) 2017-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 4 */
16 5
17#include <linux/linkage.h> 6#include <linux/linkage.h>
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 4165485e8b6e..2af3dd89bcdb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -293,6 +293,57 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
293static void __do_user_fault(struct siginfo *info, unsigned int esr) 293static void __do_user_fault(struct siginfo *info, unsigned int esr)
294{ 294{
295 current->thread.fault_address = (unsigned long)info->si_addr; 295 current->thread.fault_address = (unsigned long)info->si_addr;
296
297 /*
298 * If the faulting address is in the kernel, we must sanitize the ESR.
299 * From userspace's point of view, kernel-only mappings don't exist
300 * at all, so we report them as level 0 translation faults.
301 * (This is not quite the way that "no mapping there at all" behaves:
302 * an alignment fault not caused by the memory type would take
303 * precedence over translation fault for a real access to empty
304 * space. Unfortunately we can't easily distinguish "alignment fault
305 * not caused by memory type" from "alignment fault caused by memory
306 * type", so we ignore this wrinkle and just return the translation
307 * fault.)
308 */
309 if (current->thread.fault_address >= TASK_SIZE) {
310 switch (ESR_ELx_EC(esr)) {
311 case ESR_ELx_EC_DABT_LOW:
312 /*
313 * These bits provide only information about the
314 * faulting instruction, which userspace knows already.
315 * We explicitly clear bits which are architecturally
316 * RES0 in case they are given meanings in future.
317 * We always report the ESR as if the fault was taken
318 * to EL1 and so ISV and the bits in ISS[23:14] are
319 * clear. (In fact it always will be a fault to EL1.)
320 */
321 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL |
322 ESR_ELx_CM | ESR_ELx_WNR;
323 esr |= ESR_ELx_FSC_FAULT;
324 break;
325 case ESR_ELx_EC_IABT_LOW:
326 /*
327 * Claim a level 0 translation fault.
328 * All other bits are architecturally RES0 for faults
329 * reported with that DFSC value, so we clear them.
330 */
331 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL;
332 esr |= ESR_ELx_FSC_FAULT;
333 break;
334 default:
335 /*
336 * This should never happen (entry.S only brings us
337 * into this code for insn and data aborts from a lower
338 * exception level). Fail safe by not providing an ESR
339 * context record at all.
340 */
341 WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr);
342 esr = 0;
343 break;
344 }
345 }
346
296 current->thread.fault_code = esr; 347 current->thread.fault_code = esr;
297 arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current); 348 arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current);
298} 349}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 2dbb2c9f1ec1..493ff75670ff 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -933,13 +933,15 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
933{ 933{
934 pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | 934 pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
935 pgprot_val(mk_sect_prot(prot))); 935 pgprot_val(mk_sect_prot(prot)));
936 pud_t new_pud = pfn_pud(__phys_to_pfn(phys), sect_prot);
936 937
937 /* ioremap_page_range doesn't honour BBM */ 938 /* Only allow permission changes for now */
938 if (pud_present(READ_ONCE(*pudp))) 939 if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
940 pud_val(new_pud)))
939 return 0; 941 return 0;
940 942
941 BUG_ON(phys & ~PUD_MASK); 943 BUG_ON(phys & ~PUD_MASK);
942 set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot)); 944 set_pud(pudp, new_pud);
943 return 1; 945 return 1;
944} 946}
945 947
@@ -947,13 +949,15 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
947{ 949{
948 pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | 950 pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
949 pgprot_val(mk_sect_prot(prot))); 951 pgprot_val(mk_sect_prot(prot)));
952 pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), sect_prot);
950 953
951 /* ioremap_page_range doesn't honour BBM */ 954 /* Only allow permission changes for now */
952 if (pmd_present(READ_ONCE(*pmdp))) 955 if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
956 pmd_val(new_pmd)))
953 return 0; 957 return 0;
954 958
955 BUG_ON(phys & ~PMD_MASK); 959 BUG_ON(phys & ~PMD_MASK);
956 set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot)); 960 set_pmd(pmdp, new_pmd);
957 return 1; 961 return 1;
958} 962}
959 963
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 249f38d3388f..b7404f2dcf5b 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -9,6 +9,12 @@ config NDS32
9 select CLKSRC_MMIO 9 select CLKSRC_MMIO
10 select CLONE_BACKWARDS 10 select CLONE_BACKWARDS
11 select COMMON_CLK 11 select COMMON_CLK
12 select GENERIC_ASHLDI3
13 select GENERIC_ASHRDI3
14 select GENERIC_LSHRDI3
15 select GENERIC_CMPDI2
16 select GENERIC_MULDI3
17 select GENERIC_UCMPDI2
12 select GENERIC_ATOMIC64 18 select GENERIC_ATOMIC64
13 select GENERIC_CPU_DEVICES 19 select GENERIC_CPU_DEVICES
14 select GENERIC_CLOCKEVENTS 20 select GENERIC_CLOCKEVENTS
@@ -82,6 +88,7 @@ endmenu
82 88
83menu "Kernel Features" 89menu "Kernel Features"
84source "kernel/Kconfig.preempt" 90source "kernel/Kconfig.preempt"
91source "kernel/Kconfig.freezer"
85source "mm/Kconfig" 92source "mm/Kconfig"
86source "kernel/Kconfig.hz" 93source "kernel/Kconfig.hz"
87endmenu 94endmenu
diff --git a/arch/nds32/Kconfig.cpu b/arch/nds32/Kconfig.cpu
index ba44cc539da9..b8c8984d1456 100644
--- a/arch/nds32/Kconfig.cpu
+++ b/arch/nds32/Kconfig.cpu
@@ -1,10 +1,11 @@
1comment "Processor Features" 1comment "Processor Features"
2 2
3config CPU_BIG_ENDIAN 3config CPU_BIG_ENDIAN
4 bool "Big endian" 4 def_bool !CPU_LITTLE_ENDIAN
5 5
6config CPU_LITTLE_ENDIAN 6config CPU_LITTLE_ENDIAN
7 def_bool !CPU_BIG_ENDIAN 7 bool "Little endian"
8 default y
8 9
9config HWZOL 10config HWZOL
10 bool "hardware zero overhead loop support" 11 bool "hardware zero overhead loop support"
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile
index 91f933d5a962..513bb2e9baf9 100644
--- a/arch/nds32/Makefile
+++ b/arch/nds32/Makefile
@@ -23,9 +23,6 @@ export TEXTADDR
23# If we have a machine-specific directory, then include it in the build. 23# If we have a machine-specific directory, then include it in the build.
24core-y += arch/nds32/kernel/ arch/nds32/mm/ 24core-y += arch/nds32/kernel/ arch/nds32/mm/
25libs-y += arch/nds32/lib/ 25libs-y += arch/nds32/lib/
26LIBGCC_PATH := \
27 $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
28libs-y += $(LIBGCC_PATH)
29 26
30ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""' 27ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""'
31BUILTIN_DTB := y 28BUILTIN_DTB := y
@@ -35,8 +32,12 @@ endif
35 32
36ifdef CONFIG_CPU_LITTLE_ENDIAN 33ifdef CONFIG_CPU_LITTLE_ENDIAN
37KBUILD_CFLAGS += $(call cc-option, -EL) 34KBUILD_CFLAGS += $(call cc-option, -EL)
35KBUILD_AFLAGS += $(call cc-option, -EL)
36LDFLAGS += $(call cc-option, -EL)
38else 37else
39KBUILD_CFLAGS += $(call cc-option, -EB) 38KBUILD_CFLAGS += $(call cc-option, -EB)
39KBUILD_AFLAGS += $(call cc-option, -EB)
40LDFLAGS += $(call cc-option, -EB)
40endif 41endif
41 42
42boot := arch/nds32/boot 43boot := arch/nds32/boot
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild
index 06bdf8167f5a..142e612aa639 100644
--- a/arch/nds32/include/asm/Kbuild
+++ b/arch/nds32/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += dma.h
16generic-y += emergency-restart.h 16generic-y += emergency-restart.h
17generic-y += errno.h 17generic-y += errno.h
18generic-y += exec.h 18generic-y += exec.h
19generic-y += export.h
19generic-y += fb.h 20generic-y += fb.h
20generic-y += fcntl.h 21generic-y += fcntl.h
21generic-y += ftrace.h 22generic-y += ftrace.h
@@ -49,6 +50,7 @@ generic-y += switch_to.h
49generic-y += timex.h 50generic-y += timex.h
50generic-y += topology.h 51generic-y += topology.h
51generic-y += trace_clock.h 52generic-y += trace_clock.h
53generic-y += xor.h
52generic-y += unaligned.h 54generic-y += unaligned.h
53generic-y += user.h 55generic-y += user.h
54generic-y += vga.h 56generic-y += vga.h
diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h
index c73f71d67744..8e84fc385b94 100644
--- a/arch/nds32/include/asm/bitfield.h
+++ b/arch/nds32/include/asm/bitfield.h
@@ -336,7 +336,7 @@
336#define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE ) 336#define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE )
337#define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM ) 337#define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM )
338 338
339#define INT_MASK_INITAIAL_VAL 0x10003 339#define INT_MASK_INITAIAL_VAL (INT_MASK_mskDSSIM|INT_MASK_mskIDIVZE)
340 340
341/****************************************************************************** 341/******************************************************************************
342 * ir15: INT_PEND (Interrupt Pending Register) 342 * ir15: INT_PEND (Interrupt Pending Register)
@@ -396,6 +396,7 @@
396#define MMU_CTL_D8KB 1 396#define MMU_CTL_D8KB 1
397#define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA ) 397#define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA )
398 398
399#define MMU_CTL_CACHEABLE_NON 0
399#define MMU_CTL_CACHEABLE_WB 2 400#define MMU_CTL_CACHEABLE_WB 2
400#define MMU_CTL_CACHEABLE_WT 3 401#define MMU_CTL_CACHEABLE_WT 3
401 402
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
index 1240f148ec0f..10b48f0d8e85 100644
--- a/arch/nds32/include/asm/cacheflush.h
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -32,6 +32,8 @@ void flush_anon_page(struct vm_area_struct *vma,
32 32
33#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 33#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
34void flush_kernel_dcache_page(struct page *page); 34void flush_kernel_dcache_page(struct page *page);
35void flush_kernel_vmap_range(void *addr, int size);
36void invalidate_kernel_vmap_range(void *addr, int size);
35void flush_icache_range(unsigned long start, unsigned long end); 37void flush_icache_range(unsigned long start, unsigned long end);
36void flush_icache_page(struct vm_area_struct *vma, struct page *page); 38void flush_icache_page(struct vm_area_struct *vma, struct page *page);
37#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) 39#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages)
diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h
index 966e71b3c960..71cd226d6863 100644
--- a/arch/nds32/include/asm/io.h
+++ b/arch/nds32/include/asm/io.h
@@ -4,6 +4,8 @@
4#ifndef __ASM_NDS32_IO_H 4#ifndef __ASM_NDS32_IO_H
5#define __ASM_NDS32_IO_H 5#define __ASM_NDS32_IO_H
6 6
7#include <linux/types.h>
8
7extern void iounmap(volatile void __iomem *addr); 9extern void iounmap(volatile void __iomem *addr);
8#define __raw_writeb __raw_writeb 10#define __raw_writeb __raw_writeb
9static inline void __raw_writeb(u8 val, volatile void __iomem *addr) 11static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
diff --git a/arch/nds32/include/asm/page.h b/arch/nds32/include/asm/page.h
index e27365c097b6..947f0491c9a7 100644
--- a/arch/nds32/include/asm/page.h
+++ b/arch/nds32/include/asm/page.h
@@ -27,6 +27,9 @@ extern void copy_user_highpage(struct page *to, struct page *from,
27 unsigned long vaddr, struct vm_area_struct *vma); 27 unsigned long vaddr, struct vm_area_struct *vma);
28extern void clear_user_highpage(struct page *page, unsigned long vaddr); 28extern void clear_user_highpage(struct page *page, unsigned long vaddr);
29 29
30void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
31 struct page *to);
32void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
30#define __HAVE_ARCH_COPY_USER_HIGHPAGE 33#define __HAVE_ARCH_COPY_USER_HIGHPAGE
31#define clear_user_highpage clear_user_highpage 34#define clear_user_highpage clear_user_highpage
32#else 35#else
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index 6783937edbeb..d3e19a55cf53 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -152,6 +152,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
152#define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE) 152#define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE)
153#define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) 153#define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
154#define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) 154#define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
155#define PAGE_SHARED __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D | _PAGE_CACHE_SHRD)
155#define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV) 156#define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV)
156#endif /* __ASSEMBLY__ */ 157#endif /* __ASSEMBLY__ */
157 158
diff --git a/arch/nds32/kernel/ex-entry.S b/arch/nds32/kernel/ex-entry.S
index a72e83d804f5..b8ae4e9a6b93 100644
--- a/arch/nds32/kernel/ex-entry.S
+++ b/arch/nds32/kernel/ex-entry.S
@@ -118,7 +118,7 @@ common_exception_handler:
118 /* interrupt */ 118 /* interrupt */
1192: 1192:
120#ifdef CONFIG_TRACE_IRQFLAGS 120#ifdef CONFIG_TRACE_IRQFLAGS
121 jal arch_trace_hardirqs_off 121 jal trace_hardirqs_off
122#endif 122#endif
123 move $r0, $sp 123 move $r0, $sp
124 sethi $lp, hi20(ret_from_intr) 124 sethi $lp, hi20(ret_from_intr)
diff --git a/arch/nds32/kernel/head.S b/arch/nds32/kernel/head.S
index 71f57bd70f3b..c5fdae174ced 100644
--- a/arch/nds32/kernel/head.S
+++ b/arch/nds32/kernel/head.S
@@ -57,14 +57,32 @@ _nodtb:
57 isb 57 isb
58 mtsr $r4, $L1_PPTB ! load page table pointer\n" 58 mtsr $r4, $L1_PPTB ! load page table pointer\n"
59 59
60/* set NTC0 cacheable/writeback, mutliple page size in use */ 60#ifdef CONFIG_CPU_DCACHE_DISABLE
61 #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_NON
62#else
63 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
64 #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WT
65 #else
66 #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WB
67 #endif
68#endif
69
70/* set NTC cacheability, mutliple page size in use */
61 mfsr $r3, $MMU_CTL 71 mfsr $r3, $MMU_CTL
62 li $r0, #~MMU_CTL_mskNTC0 72#if CONFIG_MEMORY_START >= 0xc0000000
63 and $r3, $r3, $r0 73 ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC3)
74#elif CONFIG_MEMORY_START >= 0x80000000
75 ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC2)
76#elif CONFIG_MEMORY_START >= 0x40000000
77 ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC1)
78#else
79 ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC0)
80#endif
81
64#ifdef CONFIG_ANDES_PAGE_SIZE_4KB 82#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
65 ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)) 83 ori $r3, $r3, #(MMU_CTL_mskMPZIU)
66#else 84#else
67 ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)|MMU_CTL_D8KB) 85 ori $r3, $r3, #(MMU_CTL_mskMPZIU|MMU_CTL_D8KB)
68#endif 86#endif
69#ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS 87#ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS
70 li $r0, #MMU_CTL_UNA 88 li $r0, #MMU_CTL_UNA
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
index ba910e9e4ecb..2f5b2ccebe47 100644
--- a/arch/nds32/kernel/setup.c
+++ b/arch/nds32/kernel/setup.c
@@ -293,6 +293,9 @@ void __init setup_arch(char **cmdline_p)
293 /* paging_init() sets up the MMU and marks all pages as reserved */ 293 /* paging_init() sets up the MMU and marks all pages as reserved */
294 paging_init(); 294 paging_init();
295 295
296 /* invalidate all TLB entries because the new mapping is created */
297 __nds32__tlbop_flua();
298
296 /* use generic way to parse */ 299 /* use generic way to parse */
297 parse_early_param(); 300 parse_early_param();
298 301
diff --git a/arch/nds32/kernel/stacktrace.c b/arch/nds32/kernel/stacktrace.c
index bc70113c0e84..8b231e910ea6 100644
--- a/arch/nds32/kernel/stacktrace.c
+++ b/arch/nds32/kernel/stacktrace.c
@@ -9,6 +9,7 @@ void save_stack_trace(struct stack_trace *trace)
9{ 9{
10 save_stack_trace_tsk(current, trace); 10 save_stack_trace_tsk(current, trace);
11} 11}
12EXPORT_SYMBOL_GPL(save_stack_trace);
12 13
13void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 14void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
14{ 15{
@@ -45,3 +46,4 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
45 fpn = (unsigned long *)fpp; 46 fpn = (unsigned long *)fpp;
46 } 47 }
47} 48}
49EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c
index f1198d7a5654..016f15891f6d 100644
--- a/arch/nds32/kernel/vdso.c
+++ b/arch/nds32/kernel/vdso.c
@@ -23,7 +23,7 @@
23#include <asm/vdso_timer_info.h> 23#include <asm/vdso_timer_info.h>
24#include <asm/cache_info.h> 24#include <asm/cache_info.h>
25extern struct cache_info L1_cache_info[2]; 25extern struct cache_info L1_cache_info[2];
26extern char vdso_start, vdso_end; 26extern char vdso_start[], vdso_end[];
27static unsigned long vdso_pages __ro_after_init; 27static unsigned long vdso_pages __ro_after_init;
28static unsigned long timer_mapping_base; 28static unsigned long timer_mapping_base;
29 29
@@ -66,16 +66,16 @@ static int __init vdso_init(void)
66 int i; 66 int i;
67 struct page **vdso_pagelist; 67 struct page **vdso_pagelist;
68 68
69 if (memcmp(&vdso_start, "\177ELF", 4)) { 69 if (memcmp(vdso_start, "\177ELF", 4)) {
70 pr_err("vDSO is not a valid ELF object!\n"); 70 pr_err("vDSO is not a valid ELF object!\n");
71 return -EINVAL; 71 return -EINVAL;
72 } 72 }
73 /* Creat a timer io mapping to get clock cycles counter */ 73 /* Creat a timer io mapping to get clock cycles counter */
74 get_timer_node_info(); 74 get_timer_node_info();
75 75
76 vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; 76 vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
77 pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", 77 pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
78 vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); 78 vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
79 79
80 /* Allocate the vDSO pagelist */ 80 /* Allocate the vDSO pagelist */
81 vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL); 81 vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL);
@@ -83,7 +83,7 @@ static int __init vdso_init(void)
83 return -ENOMEM; 83 return -ENOMEM;
84 84
85 for (i = 0; i < vdso_pages; i++) 85 for (i = 0; i < vdso_pages; i++)
86 vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE); 86 vdso_pagelist[i] = virt_to_page(vdso_start + i * PAGE_SIZE);
87 vdso_spec[1].pages = &vdso_pagelist[0]; 87 vdso_spec[1].pages = &vdso_pagelist[0];
88 88
89 return 0; 89 return 0;
diff --git a/arch/nds32/lib/copy_page.S b/arch/nds32/lib/copy_page.S
index 4a2ff85f17ee..f8701ed161a8 100644
--- a/arch/nds32/lib/copy_page.S
+++ b/arch/nds32/lib/copy_page.S
@@ -2,6 +2,7 @@
2// Copyright (C) 2005-2017 Andes Technology Corporation 2// Copyright (C) 2005-2017 Andes Technology Corporation
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <asm/export.h>
5#include <asm/page.h> 6#include <asm/page.h>
6 7
7 .text 8 .text
@@ -16,6 +17,7 @@ ENTRY(copy_page)
16 popm $r2, $r10 17 popm $r2, $r10
17 ret 18 ret
18ENDPROC(copy_page) 19ENDPROC(copy_page)
20EXPORT_SYMBOL(copy_page)
19 21
20ENTRY(clear_page) 22ENTRY(clear_page)
21 pushm $r1, $r9 23 pushm $r1, $r9
@@ -35,3 +37,4 @@ ENTRY(clear_page)
35 popm $r1, $r9 37 popm $r1, $r9
36 ret 38 ret
37ENDPROC(clear_page) 39ENDPROC(clear_page)
40EXPORT_SYMBOL(clear_page)
diff --git a/arch/nds32/mm/alignment.c b/arch/nds32/mm/alignment.c
index b96a01b10ca7..e1aed9dc692d 100644
--- a/arch/nds32/mm/alignment.c
+++ b/arch/nds32/mm/alignment.c
@@ -19,7 +19,7 @@
19#define RA(inst) (((inst) >> 15) & 0x1FUL) 19#define RA(inst) (((inst) >> 15) & 0x1FUL)
20#define RB(inst) (((inst) >> 10) & 0x1FUL) 20#define RB(inst) (((inst) >> 10) & 0x1FUL)
21#define SV(inst) (((inst) >> 8) & 0x3UL) 21#define SV(inst) (((inst) >> 8) & 0x3UL)
22#define IMM(inst) (((inst) >> 0) & 0x3FFFUL) 22#define IMM(inst) (((inst) >> 0) & 0x7FFFUL)
23 23
24#define RA3(inst) (((inst) >> 3) & 0x7UL) 24#define RA3(inst) (((inst) >> 3) & 0x7UL)
25#define RT3(inst) (((inst) >> 6) & 0x7UL) 25#define RT3(inst) (((inst) >> 6) & 0x7UL)
@@ -28,6 +28,9 @@
28#define RA5(inst) (((inst) >> 0) & 0x1FUL) 28#define RA5(inst) (((inst) >> 0) & 0x1FUL)
29#define RT4(inst) (((inst) >> 5) & 0xFUL) 29#define RT4(inst) (((inst) >> 5) & 0xFUL)
30 30
31#define GET_IMMSVAL(imm_value) \
32 (((imm_value >> 14) & 0x1) ? (imm_value - 0x8000) : imm_value)
33
31#define __get8_data(val,addr,err) \ 34#define __get8_data(val,addr,err) \
32 __asm__( \ 35 __asm__( \
33 "1: lbi.bi %1, [%2], #1\n" \ 36 "1: lbi.bi %1, [%2], #1\n" \
@@ -467,7 +470,7 @@ static inline int do_32(unsigned long inst, struct pt_regs *regs)
467 } 470 }
468 471
469 if (imm) 472 if (imm)
470 shift = IMM(inst) * len; 473 shift = GET_IMMSVAL(IMM(inst)) * len;
471 else 474 else
472 shift = *idx_to_addr(regs, RB(inst)) << SV(inst); 475 shift = *idx_to_addr(regs, RB(inst)) << SV(inst);
473 476
@@ -552,7 +555,7 @@ static struct ctl_table alignment_tbl[3] = {
552 555
553static struct ctl_table nds32_sysctl_table[2] = { 556static struct ctl_table nds32_sysctl_table[2] = {
554 { 557 {
555 .procname = "unaligned_acess", 558 .procname = "unaligned_access",
556 .mode = 0555, 559 .mode = 0555,
557 .child = alignment_tbl}, 560 .child = alignment_tbl},
558 {} 561 {}
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c
index 6eb786a399a2..ce8fd34497bf 100644
--- a/arch/nds32/mm/cacheflush.c
+++ b/arch/nds32/mm/cacheflush.c
@@ -147,6 +147,25 @@ void flush_cache_vunmap(unsigned long start, unsigned long end)
147 cpu_icache_inval_all(); 147 cpu_icache_inval_all();
148} 148}
149 149
150void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
151 struct page *to)
152{
153 cpu_dcache_wbinval_page((unsigned long)vaddr);
154 cpu_icache_inval_page((unsigned long)vaddr);
155 copy_page(vto, vfrom);
156 cpu_dcache_wbinval_page((unsigned long)vto);
157 cpu_icache_inval_page((unsigned long)vto);
158}
159
160void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
161{
162 cpu_dcache_wbinval_page((unsigned long)vaddr);
163 cpu_icache_inval_page((unsigned long)vaddr);
164 clear_page(addr);
165 cpu_dcache_wbinval_page((unsigned long)addr);
166 cpu_icache_inval_page((unsigned long)addr);
167}
168
150void copy_user_highpage(struct page *to, struct page *from, 169void copy_user_highpage(struct page *to, struct page *from,
151 unsigned long vaddr, struct vm_area_struct *vma) 170 unsigned long vaddr, struct vm_area_struct *vma)
152{ 171{
@@ -156,11 +175,9 @@ void copy_user_highpage(struct page *to, struct page *from,
156 pto = page_to_phys(to); 175 pto = page_to_phys(to);
157 pfrom = page_to_phys(from); 176 pfrom = page_to_phys(from);
158 177
178 local_irq_save(flags);
159 if (aliasing(vaddr, (unsigned long)kfrom)) 179 if (aliasing(vaddr, (unsigned long)kfrom))
160 cpu_dcache_wb_page((unsigned long)kfrom); 180 cpu_dcache_wb_page((unsigned long)kfrom);
161 if (aliasing(vaddr, (unsigned long)kto))
162 cpu_dcache_inval_page((unsigned long)kto);
163 local_irq_save(flags);
164 vto = kremap0(vaddr, pto); 181 vto = kremap0(vaddr, pto);
165 vfrom = kremap1(vaddr, pfrom); 182 vfrom = kremap1(vaddr, pfrom);
166 copy_page((void *)vto, (void *)vfrom); 183 copy_page((void *)vto, (void *)vfrom);
@@ -198,21 +215,25 @@ void flush_dcache_page(struct page *page)
198 if (mapping && !mapping_mapped(mapping)) 215 if (mapping && !mapping_mapped(mapping))
199 set_bit(PG_dcache_dirty, &page->flags); 216 set_bit(PG_dcache_dirty, &page->flags);
200 else { 217 else {
201 int i, pc; 218 unsigned long kaddr, flags;
202 unsigned long vto, kaddr, flags; 219
203 kaddr = (unsigned long)page_address(page); 220 kaddr = (unsigned long)page_address(page);
204 cpu_dcache_wbinval_page(kaddr);
205 pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE;
206 local_irq_save(flags); 221 local_irq_save(flags);
207 for (i = 0; i < pc; i++) { 222 cpu_dcache_wbinval_page(kaddr);
208 vto = 223 if (mapping) {
209 kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page)); 224 unsigned long vaddr, kto;
210 cpu_dcache_wbinval_page(vto); 225
211 kunmap01(vto); 226 vaddr = page->index << PAGE_SHIFT;
227 if (aliasing(vaddr, kaddr)) {
228 kto = kremap0(vaddr, page_to_phys(page));
229 cpu_dcache_wbinval_page(kto);
230 kunmap01(kto);
231 }
212 } 232 }
213 local_irq_restore(flags); 233 local_irq_restore(flags);
214 } 234 }
215} 235}
236EXPORT_SYMBOL(flush_dcache_page);
216 237
217void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 238void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
218 unsigned long vaddr, void *dst, void *src, int len) 239 unsigned long vaddr, void *dst, void *src, int len)
@@ -251,7 +272,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
251void flush_anon_page(struct vm_area_struct *vma, 272void flush_anon_page(struct vm_area_struct *vma,
252 struct page *page, unsigned long vaddr) 273 struct page *page, unsigned long vaddr)
253{ 274{
254 unsigned long flags; 275 unsigned long kaddr, flags, ktmp;
255 if (!PageAnon(page)) 276 if (!PageAnon(page))
256 return; 277 return;
257 278
@@ -261,7 +282,12 @@ void flush_anon_page(struct vm_area_struct *vma,
261 local_irq_save(flags); 282 local_irq_save(flags);
262 if (vma->vm_flags & VM_EXEC) 283 if (vma->vm_flags & VM_EXEC)
263 cpu_icache_inval_page(vaddr & PAGE_MASK); 284 cpu_icache_inval_page(vaddr & PAGE_MASK);
264 cpu_dcache_wbinval_page((unsigned long)page_address(page)); 285 kaddr = (unsigned long)page_address(page);
286 if (aliasing(vaddr, kaddr)) {
287 ktmp = kremap0(vaddr, page_to_phys(page));
288 cpu_dcache_wbinval_page(ktmp);
289 kunmap01(ktmp);
290 }
265 local_irq_restore(flags); 291 local_irq_restore(flags);
266} 292}
267 293
@@ -272,6 +298,25 @@ void flush_kernel_dcache_page(struct page *page)
272 cpu_dcache_wbinval_page((unsigned long)page_address(page)); 298 cpu_dcache_wbinval_page((unsigned long)page_address(page));
273 local_irq_restore(flags); 299 local_irq_restore(flags);
274} 300}
301EXPORT_SYMBOL(flush_kernel_dcache_page);
302
303void flush_kernel_vmap_range(void *addr, int size)
304{
305 unsigned long flags;
306 local_irq_save(flags);
307 cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr + size);
308 local_irq_restore(flags);
309}
310EXPORT_SYMBOL(flush_kernel_vmap_range);
311
312void invalidate_kernel_vmap_range(void *addr, int size)
313{
314 unsigned long flags;
315 local_irq_save(flags);
316 cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
317 local_irq_restore(flags);
318}
319EXPORT_SYMBOL(invalidate_kernel_vmap_range);
275 320
276void flush_icache_range(unsigned long start, unsigned long end) 321void flush_icache_range(unsigned long start, unsigned long end)
277{ 322{
@@ -283,6 +328,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
283 cpu_cache_wbinval_range(start, end, 1); 328 cpu_cache_wbinval_range(start, end, 1);
284 local_irq_restore(flags); 329 local_irq_restore(flags);
285} 330}
331EXPORT_SYMBOL(flush_icache_range);
286 332
287void flush_icache_page(struct vm_area_struct *vma, struct page *page) 333void flush_icache_page(struct vm_area_struct *vma, struct page *page)
288{ 334{
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index 93ee0160720b..c713d2ad55dc 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -30,6 +30,7 @@ extern unsigned long phys_initrd_size;
30 * zero-initialized data and COW. 30 * zero-initialized data and COW.
31 */ 31 */
32struct page *empty_zero_page; 32struct page *empty_zero_page;
33EXPORT_SYMBOL(empty_zero_page);
33 34
34static void __init zone_sizes_init(void) 35static void __init zone_sizes_init(void)
35{ 36{
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 4c02a7378d06..e7377b73cfec 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -96,6 +96,7 @@ struct kvmppc_vcore {
96 struct kvm_vcpu *runner; 96 struct kvm_vcpu *runner;
97 struct kvm *kvm; 97 struct kvm *kvm;
98 u64 tb_offset; /* guest timebase - host timebase */ 98 u64 tb_offset; /* guest timebase - host timebase */
99 u64 tb_offset_applied; /* timebase offset currently in force */
99 ulong lpcr; 100 ulong lpcr;
100 u32 arch_compat; 101 u32 arch_compat;
101 ulong pcr; 102 ulong pcr;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6bee65f3cfd3..373dc1d6ef44 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -562,6 +562,7 @@ int main(void)
562 OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads); 562 OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads);
563 OFFSET(VCORE_KVM, kvmppc_vcore, kvm); 563 OFFSET(VCORE_KVM, kvmppc_vcore, kvm);
564 OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset); 564 OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset);
565 OFFSET(VCORE_TB_OFFSET_APPL, kvmppc_vcore, tb_offset_applied);
565 OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr); 566 OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr);
566 OFFSET(VCORE_PCR, kvmppc_vcore, pcr); 567 OFFSET(VCORE_PCR, kvmppc_vcore, pcr);
567 OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes); 568 OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes);
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 3f30c994e931..458b928dbd84 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7)
28 beqlr 28 beqlr
29 li r0,0 29 li r0,0
30 mtspr SPRN_LPID,r0 30 mtspr SPRN_LPID,r0
31 mtspr SPRN_PCR,r0
31 mfspr r3,SPRN_LPCR 32 mfspr r3,SPRN_LPCR
32 li r4,(LPCR_LPES1 >> LPCR_LPES_SH) 33 li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
33 bl __init_LPCR_ISA206 34 bl __init_LPCR_ISA206
@@ -41,6 +42,7 @@ _GLOBAL(__restore_cpu_power7)
41 beqlr 42 beqlr
42 li r0,0 43 li r0,0
43 mtspr SPRN_LPID,r0 44 mtspr SPRN_LPID,r0
45 mtspr SPRN_PCR,r0
44 mfspr r3,SPRN_LPCR 46 mfspr r3,SPRN_LPCR
45 li r4,(LPCR_LPES1 >> LPCR_LPES_SH) 47 li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
46 bl __init_LPCR_ISA206 48 bl __init_LPCR_ISA206
@@ -57,6 +59,7 @@ _GLOBAL(__setup_cpu_power8)
57 beqlr 59 beqlr
58 li r0,0 60 li r0,0
59 mtspr SPRN_LPID,r0 61 mtspr SPRN_LPID,r0
62 mtspr SPRN_PCR,r0
60 mfspr r3,SPRN_LPCR 63 mfspr r3,SPRN_LPCR
61 ori r3, r3, LPCR_PECEDH 64 ori r3, r3, LPCR_PECEDH
62 li r4,0 /* LPES = 0 */ 65 li r4,0 /* LPES = 0 */
@@ -78,6 +81,7 @@ _GLOBAL(__restore_cpu_power8)
78 beqlr 81 beqlr
79 li r0,0 82 li r0,0
80 mtspr SPRN_LPID,r0 83 mtspr SPRN_LPID,r0
84 mtspr SPRN_PCR,r0
81 mfspr r3,SPRN_LPCR 85 mfspr r3,SPRN_LPCR
82 ori r3, r3, LPCR_PECEDH 86 ori r3, r3, LPCR_PECEDH
83 li r4,0 /* LPES = 0 */ 87 li r4,0 /* LPES = 0 */
@@ -99,6 +103,7 @@ _GLOBAL(__setup_cpu_power9)
99 mtspr SPRN_PSSCR,r0 103 mtspr SPRN_PSSCR,r0
100 mtspr SPRN_LPID,r0 104 mtspr SPRN_LPID,r0
101 mtspr SPRN_PID,r0 105 mtspr SPRN_PID,r0
106 mtspr SPRN_PCR,r0
102 mfspr r3,SPRN_LPCR 107 mfspr r3,SPRN_LPCR
103 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) 108 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
104 or r3, r3, r4 109 or r3, r3, r4
@@ -123,6 +128,7 @@ _GLOBAL(__restore_cpu_power9)
123 mtspr SPRN_PSSCR,r0 128 mtspr SPRN_PSSCR,r0
124 mtspr SPRN_LPID,r0 129 mtspr SPRN_LPID,r0
125 mtspr SPRN_PID,r0 130 mtspr SPRN_PID,r0
131 mtspr SPRN_PCR,r0
126 mfspr r3,SPRN_LPCR 132 mfspr r3,SPRN_LPCR
127 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) 133 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
128 or r3, r3, r4 134 or r3, r3, r4
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 8ab51f6ca03a..c904477abaf3 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -101,6 +101,7 @@ static void __restore_cpu_cpufeatures(void)
101 if (hv_mode) { 101 if (hv_mode) {
102 mtspr(SPRN_LPID, 0); 102 mtspr(SPRN_LPID, 0);
103 mtspr(SPRN_HFSCR, system_registers.hfscr); 103 mtspr(SPRN_HFSCR, system_registers.hfscr);
104 mtspr(SPRN_PCR, 0);
104 } 105 }
105 mtspr(SPRN_FSCR, system_registers.fscr); 106 mtspr(SPRN_FSCR, system_registers.fscr);
106 107
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index a57eafec4dc2..361f42c8c73e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -162,7 +162,7 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
162 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) 162 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG))
163 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) 163 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
164 : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); 164 : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
165 asm volatile("ptesync": : :"memory"); 165 asm volatile("eieio ; tlbsync ; ptesync": : :"memory");
166} 166}
167 167
168static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) 168static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr)
@@ -173,7 +173,7 @@ static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr)
173 /* RIC=1 PRS=0 R=1 IS=2 */ 173 /* RIC=1 PRS=0 R=1 IS=2 */
174 asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1) 174 asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1)
175 : : "r" (rb), "r" (kvm->arch.lpid) : "memory"); 175 : : "r" (rb), "r" (kvm->arch.lpid) : "memory");
176 asm volatile("ptesync": : :"memory"); 176 asm volatile("eieio ; tlbsync ; ptesync": : :"memory");
177} 177}
178 178
179unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, 179unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
@@ -584,7 +584,7 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
584 584
585 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); 585 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
586 if (ptep && pte_present(*ptep)) { 586 if (ptep && pte_present(*ptep)) {
587 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0, 587 old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0,
588 gpa, shift); 588 gpa, shift);
589 kvmppc_radix_tlbie_page(kvm, gpa, shift); 589 kvmppc_radix_tlbie_page(kvm, gpa, shift);
590 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { 590 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 4d07fca5121c..9963f65c212b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2441,6 +2441,7 @@ static void init_vcore_to_run(struct kvmppc_vcore *vc)
2441 vc->in_guest = 0; 2441 vc->in_guest = 0;
2442 vc->napping_threads = 0; 2442 vc->napping_threads = 0;
2443 vc->conferring_threads = 0; 2443 vc->conferring_threads = 0;
2444 vc->tb_offset_applied = 0;
2444} 2445}
2445 2446
2446static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) 2447static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bd63fa8a08b5..07ca1b2a7966 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -692,6 +692,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
69222: ld r8,VCORE_TB_OFFSET(r5) 69222: ld r8,VCORE_TB_OFFSET(r5)
693 cmpdi r8,0 693 cmpdi r8,0
694 beq 37f 694 beq 37f
695 std r8, VCORE_TB_OFFSET_APPL(r5)
695 mftb r6 /* current host timebase */ 696 mftb r6 /* current host timebase */
696 add r8,r8,r6 697 add r8,r8,r6
697 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 698 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
@@ -940,18 +941,6 @@ FTR_SECTION_ELSE
940ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 941ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
9418: 9428:
942 943
943 /*
944 * Set the decrementer to the guest decrementer.
945 */
946 ld r8,VCPU_DEC_EXPIRES(r4)
947 /* r8 is a host timebase value here, convert to guest TB */
948 ld r5,HSTATE_KVM_VCORE(r13)
949 ld r6,VCORE_TB_OFFSET(r5)
950 add r8,r8,r6
951 mftb r7
952 subf r3,r7,r8
953 mtspr SPRN_DEC,r3
954
955 ld r5, VCPU_SPRG0(r4) 944 ld r5, VCPU_SPRG0(r4)
956 ld r6, VCPU_SPRG1(r4) 945 ld r6, VCPU_SPRG1(r4)
957 ld r7, VCPU_SPRG2(r4) 946 ld r7, VCPU_SPRG2(r4)
@@ -1005,6 +994,18 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1005 mtspr SPRN_LPCR,r8 994 mtspr SPRN_LPCR,r8
1006 isync 995 isync
1007 996
997 /*
998 * Set the decrementer to the guest decrementer.
999 */
1000 ld r8,VCPU_DEC_EXPIRES(r4)
1001 /* r8 is a host timebase value here, convert to guest TB */
1002 ld r5,HSTATE_KVM_VCORE(r13)
1003 ld r6,VCORE_TB_OFFSET_APPL(r5)
1004 add r8,r8,r6
1005 mftb r7
1006 subf r3,r7,r8
1007 mtspr SPRN_DEC,r3
1008
1008 /* Check if HDEC expires soon */ 1009 /* Check if HDEC expires soon */
1009 mfspr r3, SPRN_HDEC 1010 mfspr r3, SPRN_HDEC
1010 EXTEND_HDEC(r3) 1011 EXTEND_HDEC(r3)
@@ -1597,8 +1598,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1597 1598
1598guest_bypass: 1599guest_bypass:
1599 stw r12, STACK_SLOT_TRAP(r1) 1600 stw r12, STACK_SLOT_TRAP(r1)
1600 mr r3, r12 1601
1602 /* Save DEC */
1603 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1604 ld r3, HSTATE_KVM_VCORE(r13)
1605 mfspr r5,SPRN_DEC
1606 mftb r6
1607 /* On P9, if the guest has large decr enabled, don't sign extend */
1608BEGIN_FTR_SECTION
1609 ld r4, VCORE_LPCR(r3)
1610 andis. r4, r4, LPCR_LD@h
1611 bne 16f
1612END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1613 extsw r5,r5
161416: add r5,r5,r6
1615 /* r5 is a guest timebase value here, convert to host TB */
1616 ld r4,VCORE_TB_OFFSET_APPL(r3)
1617 subf r5,r4,r5
1618 std r5,VCPU_DEC_EXPIRES(r9)
1619
1601 /* Increment exit count, poke other threads to exit */ 1620 /* Increment exit count, poke other threads to exit */
1621 mr r3, r12
1602 bl kvmhv_commence_exit 1622 bl kvmhv_commence_exit
1603 nop 1623 nop
1604 ld r9, HSTATE_KVM_VCPU(r13) 1624 ld r9, HSTATE_KVM_VCPU(r13)
@@ -1639,23 +1659,6 @@ guest_bypass:
1639 mtspr SPRN_PURR,r3 1659 mtspr SPRN_PURR,r3
1640 mtspr SPRN_SPURR,r4 1660 mtspr SPRN_SPURR,r4
1641 1661
1642 /* Save DEC */
1643 ld r3, HSTATE_KVM_VCORE(r13)
1644 mfspr r5,SPRN_DEC
1645 mftb r6
1646 /* On P9, if the guest has large decr enabled, don't sign extend */
1647BEGIN_FTR_SECTION
1648 ld r4, VCORE_LPCR(r3)
1649 andis. r4, r4, LPCR_LD@h
1650 bne 16f
1651END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1652 extsw r5,r5
165316: add r5,r5,r6
1654 /* r5 is a guest timebase value here, convert to host TB */
1655 ld r4,VCORE_TB_OFFSET(r3)
1656 subf r5,r4,r5
1657 std r5,VCPU_DEC_EXPIRES(r9)
1658
1659BEGIN_FTR_SECTION 1662BEGIN_FTR_SECTION
1660 b 8f 1663 b 8f
1661END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1664END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
@@ -1905,6 +1908,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1905 cmpwi cr2, r0, 0 1908 cmpwi cr2, r0, 0
1906 beq cr2, 4f 1909 beq cr2, 4f
1907 1910
1911 /*
1912 * Radix: do eieio; tlbsync; ptesync sequence in case we
1913 * interrupted the guest between a tlbie and a ptesync.
1914 */
1915 eieio
1916 tlbsync
1917 ptesync
1918
1908 /* Radix: Handle the case where the guest used an illegal PID */ 1919 /* Radix: Handle the case where the guest used an illegal PID */
1909 LOAD_REG_ADDR(r4, mmu_base_pid) 1920 LOAD_REG_ADDR(r4, mmu_base_pid)
1910 lwz r3, VCPU_GUEST_PID(r9) 1921 lwz r3, VCPU_GUEST_PID(r9)
@@ -2017,9 +2028,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2017 2028
201827: 202927:
2019 /* Subtract timebase offset from timebase */ 2030 /* Subtract timebase offset from timebase */
2020 ld r8,VCORE_TB_OFFSET(r5) 2031 ld r8, VCORE_TB_OFFSET_APPL(r5)
2021 cmpdi r8,0 2032 cmpdi r8,0
2022 beq 17f 2033 beq 17f
2034 li r0, 0
2035 std r0, VCORE_TB_OFFSET_APPL(r5)
2023 mftb r6 /* current guest timebase */ 2036 mftb r6 /* current guest timebase */
2024 subf r8,r8,r6 2037 subf r8,r8,r6
2025 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 2038 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
@@ -2700,7 +2713,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2700 add r3, r3, r5 2713 add r3, r3, r5
2701 ld r4, HSTATE_KVM_VCPU(r13) 2714 ld r4, HSTATE_KVM_VCPU(r13)
2702 ld r5, HSTATE_KVM_VCORE(r13) 2715 ld r5, HSTATE_KVM_VCORE(r13)
2703 ld r6, VCORE_TB_OFFSET(r5) 2716 ld r6, VCORE_TB_OFFSET_APPL(r5)
2704 subf r3, r6, r3 /* convert to host TB value */ 2717 subf r3, r6, r3 /* convert to host TB value */
2705 std r3, VCPU_DEC_EXPIRES(r4) 2718 std r3, VCPU_DEC_EXPIRES(r4)
2706 2719
@@ -2799,7 +2812,7 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2799 /* Restore guest decrementer */ 2812 /* Restore guest decrementer */
2800 ld r3, VCPU_DEC_EXPIRES(r4) 2813 ld r3, VCPU_DEC_EXPIRES(r4)
2801 ld r5, HSTATE_KVM_VCORE(r13) 2814 ld r5, HSTATE_KVM_VCORE(r13)
2802 ld r6, VCORE_TB_OFFSET(r5) 2815 ld r6, VCORE_TB_OFFSET_APPL(r5)
2803 add r3, r3, r6 /* convert host TB to guest TB value */ 2816 add r3, r3, r6 /* convert host TB to guest TB value */
2804 mftb r7 2817 mftb r7
2805 subf r3, r7, r3 2818 subf r3, r7, r3
@@ -3606,12 +3619,9 @@ kvmppc_fix_pmao:
3606 */ 3619 */
3607kvmhv_start_timing: 3620kvmhv_start_timing:
3608 ld r5, HSTATE_KVM_VCORE(r13) 3621 ld r5, HSTATE_KVM_VCORE(r13)
3609 lbz r6, VCORE_IN_GUEST(r5) 3622 ld r6, VCORE_TB_OFFSET_APPL(r5)
3610 cmpwi r6, 0 3623 mftb r5
3611 beq 5f /* if in guest, need to */ 3624 subf r5, r6, r5 /* subtract current timebase offset */
3612 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
36135: mftb r5
3614 subf r5, r6, r5
3615 std r3, VCPU_CUR_ACTIVITY(r4) 3625 std r3, VCPU_CUR_ACTIVITY(r4)
3616 std r5, VCPU_ACTIVITY_START(r4) 3626 std r5, VCPU_ACTIVITY_START(r4)
3617 blr 3627 blr
@@ -3622,15 +3632,12 @@ kvmhv_start_timing:
3622 */ 3632 */
3623kvmhv_accumulate_time: 3633kvmhv_accumulate_time:
3624 ld r5, HSTATE_KVM_VCORE(r13) 3634 ld r5, HSTATE_KVM_VCORE(r13)
3625 lbz r8, VCORE_IN_GUEST(r5) 3635 ld r8, VCORE_TB_OFFSET_APPL(r5)
3626 cmpwi r8, 0 3636 ld r5, VCPU_CUR_ACTIVITY(r4)
3627 beq 4f /* if in guest, need to */
3628 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
36294: ld r5, VCPU_CUR_ACTIVITY(r4)
3630 ld r6, VCPU_ACTIVITY_START(r4) 3637 ld r6, VCPU_ACTIVITY_START(r4)
3631 std r3, VCPU_CUR_ACTIVITY(r4) 3638 std r3, VCPU_CUR_ACTIVITY(r4)
3632 mftb r7 3639 mftb r7
3633 subf r7, r8, r7 3640 subf r7, r8, r7 /* subtract current timebase offset */
3634 std r7, VCPU_ACTIVITY_START(r4) 3641 std r7, VCPU_ACTIVITY_START(r4)
3635 cmpdi r5, 0 3642 cmpdi r5, 0
3636 beqlr 3643 beqlr
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index c7a5deadd1cc..99c3620b40d9 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -11,6 +11,9 @@
11#define XGLUE(a,b) a##b 11#define XGLUE(a,b) a##b
12#define GLUE(a,b) XGLUE(a,b) 12#define GLUE(a,b) XGLUE(a,b)
13 13
14/* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
15#define XICS_DUMMY 1
16
14static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) 17static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
15{ 18{
16 u8 cppr; 19 u8 cppr;
@@ -205,6 +208,10 @@ skip_ipi:
205 goto skip_ipi; 208 goto skip_ipi;
206 } 209 }
207 210
211 /* If it's the dummy interrupt, continue searching */
212 if (hirq == XICS_DUMMY)
213 goto skip_ipi;
214
208 /* If fetching, update queue pointers */ 215 /* If fetching, update queue pointers */
209 if (scan_type == scan_fetch) { 216 if (scan_type == scan_fetch) {
210 q->idx = idx; 217 q->idx = idx;
@@ -385,9 +392,76 @@ static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
385 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING); 392 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
386} 393}
387 394
395static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive,
396 struct kvmppc_xive_vcpu *xc)
397{
398 unsigned int prio;
399
400 /* For each priority that is now masked */
401 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
402 struct xive_q *q = &xc->queues[prio];
403 struct kvmppc_xive_irq_state *state;
404 struct kvmppc_xive_src_block *sb;
405 u32 idx, toggle, entry, irq, hw_num;
406 struct xive_irq_data *xd;
407 __be32 *qpage;
408 u16 src;
409
410 idx = q->idx;
411 toggle = q->toggle;
412 qpage = READ_ONCE(q->qpage);
413 if (!qpage)
414 continue;
415
416 /* For each interrupt in the queue */
417 for (;;) {
418 entry = be32_to_cpup(qpage + idx);
419
420 /* No more ? */
421 if ((entry >> 31) == toggle)
422 break;
423 irq = entry & 0x7fffffff;
424
425 /* Skip dummies and IPIs */
426 if (irq == XICS_DUMMY || irq == XICS_IPI)
427 goto next;
428 sb = kvmppc_xive_find_source(xive, irq, &src);
429 if (!sb)
430 goto next;
431 state = &sb->irq_state[src];
432
433 /* Has it been rerouted ? */
434 if (xc->server_num == state->act_server)
435 goto next;
436
437 /*
438 * Allright, it *has* been re-routed, kill it from
439 * the queue.
440 */
441 qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
442
443 /* Find the HW interrupt */
444 kvmppc_xive_select_irq(state, &hw_num, &xd);
445
446 /* If it's not an LSI, set PQ to 11 the EOI will force a resend */
447 if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
448 GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11);
449
450 /* EOI the source */
451 GLUE(X_PFX,source_eoi)(hw_num, xd);
452
453 next:
454 idx = (idx + 1) & q->msk;
455 if (idx == 0)
456 toggle ^= 1;
457 }
458 }
459}
460
388X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) 461X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
389{ 462{
390 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 463 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
464 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
391 u8 old_cppr; 465 u8 old_cppr;
392 466
393 pr_devel("H_CPPR(cppr=%ld)\n", cppr); 467 pr_devel("H_CPPR(cppr=%ld)\n", cppr);
@@ -407,14 +481,34 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
407 */ 481 */
408 smp_mb(); 482 smp_mb();
409 483
410 /* 484 if (cppr > old_cppr) {
411 * We are masking less, we need to look for pending things 485 /*
412 * to deliver and set VP pending bits accordingly to trigger 486 * We are masking less, we need to look for pending things
413 * a new interrupt otherwise we might miss MFRR changes for 487 * to deliver and set VP pending bits accordingly to trigger
414 * which we have optimized out sending an IPI signal. 488 * a new interrupt otherwise we might miss MFRR changes for
415 */ 489 * which we have optimized out sending an IPI signal.
416 if (cppr > old_cppr) 490 */
417 GLUE(X_PFX,push_pending_to_hw)(xc); 491 GLUE(X_PFX,push_pending_to_hw)(xc);
492 } else {
493 /*
494 * We are masking more, we need to check the queue for any
495 * interrupt that has been routed to another CPU, take
496 * it out (replace it with the dummy) and retrigger it.
497 *
498 * This is necessary since those interrupts may otherwise
499 * never be processed, at least not until this CPU restores
500 * its CPPR.
501 *
502 * This is in theory racy vs. HW adding new interrupts to
503 * the queue. In practice this works because the interesting
504 * cases are when the guest has done a set_xive() to move the
505 * interrupt away, which flushes the xive, followed by the
506 * target CPU doing a H_CPPR. So any new interrupt coming into
507 * the queue must still be routed to us and isn't a source
508 * of concern.
509 */
510 GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc);
511 }
418 512
419 /* Apply new CPPR */ 513 /* Apply new CPPR */
420 xc->hw_cppr = cppr; 514 xc->hw_cppr = cppr;
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 8961e3970901..969882b54266 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -578,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
578 578
579 gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; 579 gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
580 if (gpa && (scb_s->ecb & ECB_TE)) { 580 if (gpa && (scb_s->ecb & ECB_TE)) {
581 if (!(gpa & ~0x1fffU)) { 581 if (!(gpa & ~0x1fffUL)) {
582 rc = set_validity_icpt(scb_s, 0x0080U); 582 rc = set_validity_icpt(scb_s, 0x0080U);
583 goto unpin; 583 goto unpin;
584 } 584 }
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 78decc3e3067..38276f58d3bf 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -942,12 +942,8 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
942 {} 942 {}
943}; 943};
944 944
945/* Only list CPUs which speculate but are non susceptible to SSB */
945static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { 946static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
946 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
947 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
948 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
949 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
950 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
951 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, 947 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
952 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, 948 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
953 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, 949 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
@@ -955,14 +951,10 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
955 { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, 951 { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
956 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, 952 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
957 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, 953 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
958 { X86_VENDOR_CENTAUR, 5, },
959 { X86_VENDOR_INTEL, 5, },
960 { X86_VENDOR_NSC, 5, },
961 { X86_VENDOR_AMD, 0x12, }, 954 { X86_VENDOR_AMD, 0x12, },
962 { X86_VENDOR_AMD, 0x11, }, 955 { X86_VENDOR_AMD, 0x11, },
963 { X86_VENDOR_AMD, 0x10, }, 956 { X86_VENDOR_AMD, 0x10, },
964 { X86_VENDOR_AMD, 0xf, }, 957 { X86_VENDOR_AMD, 0xf, },
965 { X86_VENDOR_ANY, 4, },
966 {} 958 {}
967}; 959};
968 960
@@ -970,6 +962,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
970{ 962{
971 u64 ia32_cap = 0; 963 u64 ia32_cap = 0;
972 964
965 if (x86_match_cpu(cpu_no_speculation))
966 return;
967
968 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
969 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
970
973 if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) 971 if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
974 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); 972 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
975 973
@@ -977,12 +975,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
977 !(ia32_cap & ARCH_CAP_SSB_NO)) 975 !(ia32_cap & ARCH_CAP_SSB_NO))
978 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); 976 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
979 977
980 if (x86_match_cpu(cpu_no_speculation))
981 return;
982
983 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
984 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
985
986 if (x86_match_cpu(cpu_no_meltdown)) 978 if (x86_match_cpu(cpu_no_meltdown))
987 return; 979 return;
988 980
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index ced851169730..92bf2f2e7cdd 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -407,8 +407,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
407 407
408 /* cpuid 7.0.edx*/ 408 /* cpuid 7.0.edx*/
409 const u32 kvm_cpuid_7_0_edx_x86_features = 409 const u32 kvm_cpuid_7_0_edx_x86_features =
410 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) | 410 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
411 F(ARCH_CAPABILITIES); 411 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
412 412
413 /* all calls to cpuid_count() should be made on the same cpu */ 413 /* all calls to cpuid_count() should be made on the same cpu */
414 get_cpu(); 414 get_cpu();
@@ -495,6 +495,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
495 entry->ecx &= ~F(PKU); 495 entry->ecx &= ~F(PKU);
496 entry->edx &= kvm_cpuid_7_0_edx_x86_features; 496 entry->edx &= kvm_cpuid_7_0_edx_x86_features;
497 cpuid_mask(&entry->edx, CPUID_7_EDX); 497 cpuid_mask(&entry->edx, CPUID_7_EDX);
498 /*
499 * We emulate ARCH_CAPABILITIES in software even
500 * if the host doesn't support it.
501 */
502 entry->edx |= F(ARCH_CAPABILITIES);
498 } else { 503 } else {
499 entry->ebx = 0; 504 entry->ebx = 0;
500 entry->ecx = 0; 505 entry->ecx = 0;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 5708e951a5c6..46ff64da44ca 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1260,14 +1260,18 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1260 } 1260 }
1261} 1261}
1262 1262
1263static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) 1263static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
1264{ 1264{
1265 struct kvm_run *run = vcpu->run; 1265 kvm_hv_hypercall_set_result(vcpu, result);
1266 1266 ++vcpu->stat.hypercalls;
1267 kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
1268 return kvm_skip_emulated_instruction(vcpu); 1267 return kvm_skip_emulated_instruction(vcpu);
1269} 1268}
1270 1269
1270static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1271{
1272 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
1273}
1274
1271static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) 1275static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
1272{ 1276{
1273 struct eventfd_ctx *eventfd; 1277 struct eventfd_ctx *eventfd;
@@ -1350,7 +1354,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1350 /* Hypercall continuation is not supported yet */ 1354 /* Hypercall continuation is not supported yet */
1351 if (rep_cnt || rep_idx) { 1355 if (rep_cnt || rep_idx) {
1352 ret = HV_STATUS_INVALID_HYPERCALL_CODE; 1356 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
1353 goto set_result; 1357 goto out;
1354 } 1358 }
1355 1359
1356 switch (code) { 1360 switch (code) {
@@ -1381,9 +1385,8 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1381 break; 1385 break;
1382 } 1386 }
1383 1387
1384set_result: 1388out:
1385 kvm_hv_hypercall_set_result(vcpu, ret); 1389 return kvm_hv_hypercall_complete(vcpu, ret);
1386 return 1;
1387} 1390}
1388 1391
1389void kvm_hv_init_vm(struct kvm *kvm) 1392void kvm_hv_init_vm(struct kvm *kvm)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index b74c9c1405b9..3773c4625114 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1522,11 +1522,23 @@ static bool set_target_expiration(struct kvm_lapic *apic)
1522 1522
1523static void advance_periodic_target_expiration(struct kvm_lapic *apic) 1523static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1524{ 1524{
1525 apic->lapic_timer.tscdeadline += 1525 ktime_t now = ktime_get();
1526 nsec_to_cycles(apic->vcpu, apic->lapic_timer.period); 1526 u64 tscl = rdtsc();
1527 ktime_t delta;
1528
1529 /*
1530 * Synchronize both deadlines to the same time source or
1531 * differences in the periods (caused by differences in the
1532 * underlying clocks or numerical approximation errors) will
1533 * cause the two to drift apart over time as the errors
1534 * accumulate.
1535 */
1527 apic->lapic_timer.target_expiration = 1536 apic->lapic_timer.target_expiration =
1528 ktime_add_ns(apic->lapic_timer.target_expiration, 1537 ktime_add_ns(apic->lapic_timer.target_expiration,
1529 apic->lapic_timer.period); 1538 apic->lapic_timer.period);
1539 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1540 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1541 nsec_to_cycles(apic->vcpu, delta);
1530} 1542}
1531 1543
1532static void start_sw_period(struct kvm_lapic *apic) 1544static void start_sw_period(struct kvm_lapic *apic)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 22a183aac1c6..71e7cda6d014 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6671,11 +6671,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
6671 unsigned long nr, a0, a1, a2, a3, ret; 6671 unsigned long nr, a0, a1, a2, a3, ret;
6672 int op_64_bit; 6672 int op_64_bit;
6673 6673
6674 if (kvm_hv_hypercall_enabled(vcpu->kvm)) { 6674 if (kvm_hv_hypercall_enabled(vcpu->kvm))
6675 if (!kvm_hv_hypercall(vcpu)) 6675 return kvm_hv_hypercall(vcpu);
6676 return 0;
6677 goto out;
6678 }
6679 6676
6680 nr = kvm_register_read(vcpu, VCPU_REGS_RAX); 6677 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
6681 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); 6678 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
@@ -6696,7 +6693,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
6696 6693
6697 if (kvm_x86_ops->get_cpl(vcpu) != 0) { 6694 if (kvm_x86_ops->get_cpl(vcpu) != 0) {
6698 ret = -KVM_EPERM; 6695 ret = -KVM_EPERM;
6699 goto out_error; 6696 goto out;
6700 } 6697 }
6701 6698
6702 switch (nr) { 6699 switch (nr) {
@@ -6716,12 +6713,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
6716 ret = -KVM_ENOSYS; 6713 ret = -KVM_ENOSYS;
6717 break; 6714 break;
6718 } 6715 }
6719out_error: 6716out:
6720 if (!op_64_bit) 6717 if (!op_64_bit)
6721 ret = (u32)ret; 6718 ret = (u32)ret;
6722 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); 6719 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
6723 6720
6724out:
6725 ++vcpu->stat.hypercalls; 6721 ++vcpu->stat.hypercalls;
6726 return kvm_skip_emulated_instruction(vcpu); 6722 return kvm_skip_emulated_instruction(vcpu);
6727} 6723}
@@ -7980,6 +7976,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
7980{ 7976{
7981 struct msr_data apic_base_msr; 7977 struct msr_data apic_base_msr;
7982 int mmu_reset_needed = 0; 7978 int mmu_reset_needed = 0;
7979 int cpuid_update_needed = 0;
7983 int pending_vec, max_bits, idx; 7980 int pending_vec, max_bits, idx;
7984 struct desc_ptr dt; 7981 struct desc_ptr dt;
7985 int ret = -EINVAL; 7982 int ret = -EINVAL;
@@ -8018,8 +8015,10 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
8018 vcpu->arch.cr0 = sregs->cr0; 8015 vcpu->arch.cr0 = sregs->cr0;
8019 8016
8020 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 8017 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
8018 cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
8019 (X86_CR4_OSXSAVE | X86_CR4_PKE));
8021 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 8020 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
8022 if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE)) 8021 if (cpuid_update_needed)
8023 kvm_update_cpuid(vcpu); 8022 kvm_update_cpuid(vcpu);
8024 8023
8025 idx = srcu_read_lock(&vcpu->kvm->srcu); 8024 idx = srcu_read_lock(&vcpu->kvm->srcu);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 7a3a580821e0..a5e821d09656 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -490,7 +490,8 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
490 return 0; 490 return 0;
491} 491}
492 492
493int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages) 493int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages,
494 bool check_nid)
494{ 495{
495 unsigned long end_pfn = start_pfn + nr_pages; 496 unsigned long end_pfn = start_pfn + nr_pages;
496 unsigned long pfn; 497 unsigned long pfn;
@@ -514,7 +515,7 @@ int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
514 515
515 mem_blk = find_memory_block_hinted(mem_sect, mem_blk); 516 mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
516 517
517 ret = register_mem_sect_under_node(mem_blk, nid, true); 518 ret = register_mem_sect_under_node(mem_blk, nid, check_nid);
518 if (!err) 519 if (!err)
519 err = ret; 520 err = ret;
520 521
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index f040aba48d50..27e9686b6d3a 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -184,7 +184,7 @@ static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
184{ 184{
185 int i; 185 int i;
186 static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"}; 186 static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
187 char interrupts[20]; 187 char interrupts[25];
188 char *ints = interrupts; 188 char *ints = interrupts;
189 189
190 for (i = 0; i < ARRAY_SIZE(irq_name); i++) 190 for (i = 0; i < ARRAY_SIZE(irq_name); i++)
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index dfbd894d5bb7..4e24e591ae74 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -147,7 +147,7 @@ static u32 smc(u32 cmd_addr)
147 "smc #0 @ switch to secure world\n" 147 "smc #0 @ switch to secure world\n"
148 : "=r" (r0) 148 : "=r" (r0)
149 : "r" (r0), "r" (r1), "r" (r2) 149 : "r" (r0), "r" (r1), "r" (r2)
150 : "r3"); 150 : "r3", "r12");
151 } while (r0 == QCOM_SCM_INTERRUPTED); 151 } while (r0 == QCOM_SCM_INTERRUPTED);
152 152
153 return r0; 153 return r0;
@@ -263,7 +263,7 @@ static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
263 "smc #0 @ switch to secure world\n" 263 "smc #0 @ switch to secure world\n"
264 : "=r" (r0) 264 : "=r" (r0)
265 : "r" (r0), "r" (r1), "r" (r2) 265 : "r" (r0), "r" (r1), "r" (r2)
266 : "r3"); 266 : "r3", "r12");
267 return r0; 267 return r0;
268} 268}
269 269
@@ -298,7 +298,7 @@ static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
298 "smc #0 @ switch to secure world\n" 298 "smc #0 @ switch to secure world\n"
299 : "=r" (r0) 299 : "=r" (r0)
300 : "r" (r0), "r" (r1), "r" (r2), "r" (r3) 300 : "r" (r0), "r" (r1), "r" (r2), "r" (r3)
301 ); 301 : "r12");
302 return r0; 302 return r0;
303} 303}
304 304
@@ -328,7 +328,7 @@ u32 qcom_scm_get_version(void)
328 "smc #0 @ switch to secure world\n" 328 "smc #0 @ switch to secure world\n"
329 : "=r" (r0), "=r" (r1) 329 : "=r" (r0), "=r" (r1)
330 : "r" (r0), "r" (r1) 330 : "r" (r0), "r" (r1)
331 : "r2", "r3"); 331 : "r2", "r3", "r12");
332 } while (r0 == QCOM_SCM_INTERRUPTED); 332 } while (r0 == QCOM_SCM_INTERRUPTED);
333 333
334 version = r1; 334 version = r1;
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 3d2d3bbd1342..155ad840f3c5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -88,6 +88,9 @@ static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
88 const struct drm_display_mode *panel_mode; 88 const struct drm_display_mode *panel_mode;
89 struct drm_crtc_state *crtc_state; 89 struct drm_crtc_state *crtc_state;
90 90
91 if (!state->crtc)
92 return 0;
93
91 if (list_empty(&connector->modes)) { 94 if (list_empty(&connector->modes)) {
92 dev_dbg(lvds->dev, "connector: empty modes list\n"); 95 dev_dbg(lvds->dev, "connector: empty modes list\n");
93 return -EINVAL; 96 return -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 70e1a8820a7c..8b770a8e02cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1278,8 +1278,6 @@ static void vmw_master_drop(struct drm_device *dev,
1278 dev_priv->active_master = &dev_priv->fbdev_master; 1278 dev_priv->active_master = &dev_priv->fbdev_master;
1279 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 1279 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1280 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 1280 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1281
1282 vmw_fb_refresh(dev_priv);
1283} 1281}
1284 1282
1285/** 1283/**
@@ -1483,7 +1481,6 @@ static int vmw_pm_freeze(struct device *kdev)
1483 vmw_kms_resume(dev); 1481 vmw_kms_resume(dev);
1484 if (dev_priv->enable_fb) 1482 if (dev_priv->enable_fb)
1485 vmw_fb_on(dev_priv); 1483 vmw_fb_on(dev_priv);
1486 vmw_fb_refresh(dev_priv);
1487 return -EBUSY; 1484 return -EBUSY;
1488 } 1485 }
1489 1486
@@ -1523,8 +1520,6 @@ static int vmw_pm_restore(struct device *kdev)
1523 if (dev_priv->enable_fb) 1520 if (dev_priv->enable_fb)
1524 vmw_fb_on(dev_priv); 1521 vmw_fb_on(dev_priv);
1525 1522
1526 vmw_fb_refresh(dev_priv);
1527
1528 return 0; 1523 return 0;
1529} 1524}
1530 1525
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index f34f368c1a2e..5fcbe1620d50 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -910,7 +910,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv);
910int vmw_fb_close(struct vmw_private *dev_priv); 910int vmw_fb_close(struct vmw_private *dev_priv);
911int vmw_fb_off(struct vmw_private *vmw_priv); 911int vmw_fb_off(struct vmw_private *vmw_priv);
912int vmw_fb_on(struct vmw_private *vmw_priv); 912int vmw_fb_on(struct vmw_private *vmw_priv);
913void vmw_fb_refresh(struct vmw_private *vmw_priv);
914 913
915/** 914/**
916 * Kernel modesetting - vmwgfx_kms.c 915 * Kernel modesetting - vmwgfx_kms.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index ba0cdb743c3e..54e300365a5c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -866,21 +866,13 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
866 spin_lock_irqsave(&par->dirty.lock, flags); 866 spin_lock_irqsave(&par->dirty.lock, flags);
867 par->dirty.active = true; 867 par->dirty.active = true;
868 spin_unlock_irqrestore(&par->dirty.lock, flags); 868 spin_unlock_irqrestore(&par->dirty.lock, flags);
869
870 return 0;
871}
872 869
873/** 870 /*
874 * vmw_fb_refresh - Refresh fb display 871 * Need to reschedule a dirty update, because otherwise that's
875 * 872 * only done in dirty_mark() if the previous coalesced
876 * @vmw_priv: Pointer to device private 873 * dirty region was empty.
877 * 874 */
878 * Call into kms to show the fbdev display(s). 875 schedule_delayed_work(&par->local_work, 0);
879 */
880void vmw_fb_refresh(struct vmw_private *vmw_priv)
881{
882 if (!vmw_priv->fb_info)
883 return;
884 876
885 vmw_fb_set_par(vmw_priv->fb_info); 877 return 0;
886} 878}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index cdff99211602..21d746bdc922 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -329,8 +329,6 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
329 struct rpc_channel channel; 329 struct rpc_channel channel;
330 char *msg, *reply = NULL; 330 char *msg, *reply = NULL;
331 size_t reply_len = 0; 331 size_t reply_len = 0;
332 int ret = 0;
333
334 332
335 if (!vmw_msg_enabled) 333 if (!vmw_msg_enabled)
336 return -ENODEV; 334 return -ENODEV;
@@ -344,15 +342,14 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
344 return -ENOMEM; 342 return -ENOMEM;
345 } 343 }
346 344
347 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) || 345 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
348 vmw_send_msg(&channel, msg) || 346 goto out_open;
349 vmw_recv_msg(&channel, (void *) &reply, &reply_len) ||
350 vmw_close_channel(&channel)) {
351 DRM_ERROR("Failed to get %s", guest_info_param);
352 347
353 ret = -EINVAL; 348 if (vmw_send_msg(&channel, msg) ||
354 } 349 vmw_recv_msg(&channel, (void *) &reply, &reply_len))
350 goto out_msg;
355 351
352 vmw_close_channel(&channel);
356 if (buffer && reply && reply_len > 0) { 353 if (buffer && reply && reply_len > 0) {
357 /* Remove reply code, which are the first 2 characters of 354 /* Remove reply code, which are the first 2 characters of
358 * the reply 355 * the reply
@@ -369,7 +366,17 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
369 kfree(reply); 366 kfree(reply);
370 kfree(msg); 367 kfree(msg);
371 368
372 return ret; 369 return 0;
370
371out_msg:
372 vmw_close_channel(&channel);
373 kfree(reply);
374out_open:
375 *length = 0;
376 kfree(msg);
377 DRM_ERROR("Failed to get %s", guest_info_param);
378
379 return -EINVAL;
373} 380}
374 381
375 382
@@ -400,15 +407,22 @@ int vmw_host_log(const char *log)
400 return -ENOMEM; 407 return -ENOMEM;
401 } 408 }
402 409
403 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) || 410 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
404 vmw_send_msg(&channel, msg) || 411 goto out_open;
405 vmw_close_channel(&channel)) {
406 DRM_ERROR("Failed to send log\n");
407 412
408 ret = -EINVAL; 413 if (vmw_send_msg(&channel, msg))
409 } 414 goto out_msg;
410 415
416 vmw_close_channel(&channel);
411 kfree(msg); 417 kfree(msg);
412 418
413 return ret; 419 return 0;
420
421out_msg:
422 vmw_close_channel(&channel);
423out_open:
424 kfree(msg);
425 DRM_ERROR("Failed to send log\n");
426
427 return -EINVAL;
414} 428}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
index 557a033fb610..8545488aa0cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -135,17 +135,24 @@
135 135
136#else 136#else
137 137
138/* In the 32-bit version of this macro, we use "m" because there is no 138/*
139 * more register left for bp 139 * In the 32-bit version of this macro, we store bp in a memory location
140 * because we've ran out of registers.
141 * Now we can't reference that memory location while we've modified
142 * %esp or %ebp, so we first push it on the stack, just before we push
143 * %ebp, and then when we need it we read it from the stack where we
144 * just pushed it.
140 */ 145 */
141#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \ 146#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
142 port_num, magic, bp, \ 147 port_num, magic, bp, \
143 eax, ebx, ecx, edx, si, di) \ 148 eax, ebx, ecx, edx, si, di) \
144({ \ 149({ \
145 asm volatile ("push %%ebp;" \ 150 asm volatile ("push %12;" \
146 "mov %12, %%ebp;" \ 151 "push %%ebp;" \
152 "mov 0x04(%%esp), %%ebp;" \
147 "rep outsb;" \ 153 "rep outsb;" \
148 "pop %%ebp;" : \ 154 "pop %%ebp;" \
155 "add $0x04, %%esp;" : \
149 "=a"(eax), \ 156 "=a"(eax), \
150 "=b"(ebx), \ 157 "=b"(ebx), \
151 "=c"(ecx), \ 158 "=c"(ecx), \
@@ -167,10 +174,12 @@
167 port_num, magic, bp, \ 174 port_num, magic, bp, \
168 eax, ebx, ecx, edx, si, di) \ 175 eax, ebx, ecx, edx, si, di) \
169({ \ 176({ \
170 asm volatile ("push %%ebp;" \ 177 asm volatile ("push %12;" \
171 "mov %12, %%ebp;" \ 178 "push %%ebp;" \
179 "mov 0x04(%%esp), %%ebp;" \
172 "rep insb;" \ 180 "rep insb;" \
173 "pop %%ebp" : \ 181 "pop %%ebp;" \
182 "add $0x04, %%esp;" : \
174 "=a"(eax), \ 183 "=a"(eax), \
175 "=b"(ebx), \ 184 "=b"(ebx), \
176 "=c"(ecx), \ 185 "=c"(ecx), \
diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
index 944a7f338099..1b25d8bc153a 100644
--- a/drivers/isdn/hardware/eicon/diva.c
+++ b/drivers/isdn/hardware/eicon/diva.c
@@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void)
388** Receive and process command from user mode utility 388** Receive and process command from user mode utility
389*/ 389*/
390void *diva_xdi_open_adapter(void *os_handle, const void __user *src, 390void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
391 int length, 391 int length, void *mptr,
392 divas_xdi_copy_from_user_fn_t cp_fn) 392 divas_xdi_copy_from_user_fn_t cp_fn)
393{ 393{
394 diva_xdi_um_cfg_cmd_t msg; 394 diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
395 diva_os_xdi_adapter_t *a = NULL; 395 diva_os_xdi_adapter_t *a = NULL;
396 diva_os_spin_lock_magic_t old_irql; 396 diva_os_spin_lock_magic_t old_irql;
397 struct list_head *tmp; 397 struct list_head *tmp;
@@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
401 length, sizeof(diva_xdi_um_cfg_cmd_t))) 401 length, sizeof(diva_xdi_um_cfg_cmd_t)))
402 return NULL; 402 return NULL;
403 } 403 }
404 if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) { 404 if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
405 DBG_ERR(("A: A(?) open, write error")) 405 DBG_ERR(("A: A(?) open, write error"))
406 return NULL; 406 return NULL;
407 } 407 }
408 diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter"); 408 diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
409 list_for_each(tmp, &adapter_queue) { 409 list_for_each(tmp, &adapter_queue) {
410 a = list_entry(tmp, diva_os_xdi_adapter_t, link); 410 a = list_entry(tmp, diva_os_xdi_adapter_t, link);
411 if (a->controller == (int)msg.adapter) 411 if (a->controller == (int)msg->adapter)
412 break; 412 break;
413 a = NULL; 413 a = NULL;
414 } 414 }
415 diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter"); 415 diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
416 416
417 if (!a) { 417 if (!a) {
418 DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter)) 418 DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
419 } 419 }
420 420
421 return (a); 421 return (a);
@@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle)
437 437
438int 438int
439diva_xdi_write(void *adapter, void *os_handle, const void __user *src, 439diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
440 int length, divas_xdi_copy_from_user_fn_t cp_fn) 440 int length, void *mptr,
441 divas_xdi_copy_from_user_fn_t cp_fn)
441{ 442{
443 diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
442 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter; 444 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
443 void *data; 445 void *data;
444 446
@@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
459 return (-2); 461 return (-2);
460 } 462 }
461 463
462 length = (*cp_fn) (os_handle, data, src, length); 464 if (msg) {
465 *(diva_xdi_um_cfg_cmd_t *)data = *msg;
466 length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
467 src + sizeof(*msg), length - sizeof(*msg));
468 } else {
469 length = (*cp_fn) (os_handle, data, src, length);
470 }
463 if (length > 0) { 471 if (length > 0) {
464 if ((*(a->interface.cmd_proc)) 472 if ((*(a->interface.cmd_proc))
465 (a, (diva_xdi_um_cfg_cmd_t *) data, length)) { 473 (a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h
index b067032093a8..1ad76650fbf9 100644
--- a/drivers/isdn/hardware/eicon/diva.h
+++ b/drivers/isdn/hardware/eicon/diva.h
@@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
20 int max_length, divas_xdi_copy_to_user_fn_t cp_fn); 20 int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
21 21
22int diva_xdi_write(void *adapter, void *os_handle, const void __user *src, 22int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
23 int length, divas_xdi_copy_from_user_fn_t cp_fn); 23 int length, void *msg,
24 divas_xdi_copy_from_user_fn_t cp_fn);
24 25
25void *diva_xdi_open_adapter(void *os_handle, const void __user *src, 26void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
26 int length, 27 int length, void *msg,
27 divas_xdi_copy_from_user_fn_t cp_fn); 28 divas_xdi_copy_from_user_fn_t cp_fn);
28 29
29void diva_xdi_close_adapter(void *adapter, void *os_handle); 30void diva_xdi_close_adapter(void *adapter, void *os_handle);
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index b9980e84f9db..b6a3950b2564 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file)
591static ssize_t divas_write(struct file *file, const char __user *buf, 591static ssize_t divas_write(struct file *file, const char __user *buf,
592 size_t count, loff_t *ppos) 592 size_t count, loff_t *ppos)
593{ 593{
594 diva_xdi_um_cfg_cmd_t msg;
594 int ret = -EINVAL; 595 int ret = -EINVAL;
595 596
596 if (!file->private_data) { 597 if (!file->private_data) {
597 file->private_data = diva_xdi_open_adapter(file, buf, 598 file->private_data = diva_xdi_open_adapter(file, buf,
598 count, 599 count, &msg,
599 xdi_copy_from_user); 600 xdi_copy_from_user);
600 } 601 if (!file->private_data)
601 if (!file->private_data) { 602 return (-ENODEV);
602 return (-ENODEV); 603 ret = diva_xdi_write(file->private_data, file,
604 buf, count, &msg, xdi_copy_from_user);
605 } else {
606 ret = diva_xdi_write(file->private_data, file,
607 buf, count, NULL, xdi_copy_from_user);
603 } 608 }
604 609
605 ret = diva_xdi_write(file->private_data, file,
606 buf, count, xdi_copy_from_user);
607 switch (ret) { 610 switch (ret) {
608 case -1: /* Message should be removed from rx mailbox first */ 611 case -1: /* Message should be removed from rx mailbox first */
609 ret = -EBUSY; 612 ret = -EBUSY;
@@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf,
622static ssize_t divas_read(struct file *file, char __user *buf, 625static ssize_t divas_read(struct file *file, char __user *buf,
623 size_t count, loff_t *ppos) 626 size_t count, loff_t *ppos)
624{ 627{
628 diva_xdi_um_cfg_cmd_t msg;
625 int ret = -EINVAL; 629 int ret = -EINVAL;
626 630
627 if (!file->private_data) { 631 if (!file->private_data) {
628 file->private_data = diva_xdi_open_adapter(file, buf, 632 file->private_data = diva_xdi_open_adapter(file, buf,
629 count, 633 count, &msg,
630 xdi_copy_from_user); 634 xdi_copy_from_user);
631 } 635 }
632 if (!file->private_data) { 636 if (!file->private_data) {
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 9e923cd1d80e..38a7586b00cc 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2485,7 +2485,7 @@ static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
2485 break; 2485 break;
2486 } 2486 }
2487 2487
2488 return 0; 2488 return ret;
2489} 2489}
2490 2490
2491#ifdef CONFIG_COMPAT 2491#ifdef CONFIG_COMPAT
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 0ef741bc515d..d0e83db42ae5 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -33,6 +33,8 @@ struct sdhci_iproc_host {
33 const struct sdhci_iproc_data *data; 33 const struct sdhci_iproc_data *data;
34 u32 shadow_cmd; 34 u32 shadow_cmd;
35 u32 shadow_blk; 35 u32 shadow_blk;
36 bool is_cmd_shadowed;
37 bool is_blk_shadowed;
36}; 38};
37 39
38#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18) 40#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
@@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
48 50
49static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg) 51static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
50{ 52{
51 u32 val = sdhci_iproc_readl(host, (reg & ~3)); 53 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
52 u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff; 54 struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
55 u32 val;
56 u16 word;
57
58 if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
59 /* Get the saved transfer mode */
60 val = iproc_host->shadow_cmd;
61 } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
62 iproc_host->is_blk_shadowed) {
63 /* Get the saved block info */
64 val = iproc_host->shadow_blk;
65 } else {
66 val = sdhci_iproc_readl(host, (reg & ~3));
67 }
68 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
53 return word; 69 return word;
54} 70}
55 71
@@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
105 121
106 if (reg == SDHCI_COMMAND) { 122 if (reg == SDHCI_COMMAND) {
107 /* Write the block now as we are issuing a command */ 123 /* Write the block now as we are issuing a command */
108 if (iproc_host->shadow_blk != 0) { 124 if (iproc_host->is_blk_shadowed) {
109 sdhci_iproc_writel(host, iproc_host->shadow_blk, 125 sdhci_iproc_writel(host, iproc_host->shadow_blk,
110 SDHCI_BLOCK_SIZE); 126 SDHCI_BLOCK_SIZE);
111 iproc_host->shadow_blk = 0; 127 iproc_host->is_blk_shadowed = false;
112 } 128 }
113 oldval = iproc_host->shadow_cmd; 129 oldval = iproc_host->shadow_cmd;
114 } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { 130 iproc_host->is_cmd_shadowed = false;
131 } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
132 iproc_host->is_blk_shadowed) {
115 /* Block size and count are stored in shadow reg */ 133 /* Block size and count are stored in shadow reg */
116 oldval = iproc_host->shadow_blk; 134 oldval = iproc_host->shadow_blk;
117 } else { 135 } else {
@@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
123 if (reg == SDHCI_TRANSFER_MODE) { 141 if (reg == SDHCI_TRANSFER_MODE) {
124 /* Save the transfer mode until the command is issued */ 142 /* Save the transfer mode until the command is issued */
125 iproc_host->shadow_cmd = newval; 143 iproc_host->shadow_cmd = newval;
144 iproc_host->is_cmd_shadowed = true;
126 } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { 145 } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
127 /* Save the block info until the command is issued */ 146 /* Save the block info until the command is issued */
128 iproc_host->shadow_blk = newval; 147 iproc_host->shadow_blk = newval;
148 iproc_host->is_blk_shadowed = true;
129 } else { 149 } else {
130 /* Command or other regular 32-bit write */ 150 /* Command or other regular 32-bit write */
131 sdhci_iproc_writel(host, newval, reg & ~3); 151 sdhci_iproc_writel(host, newval, reg & ~3);
@@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
166 186
167static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = { 187static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
168 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, 188 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
169 .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, 189 .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
170 .ops = &sdhci_iproc_32only_ops, 190 .ops = &sdhci_iproc_32only_ops,
171}; 191};
172 192
@@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = {
206 .caps1 = SDHCI_DRIVER_TYPE_C | 226 .caps1 = SDHCI_DRIVER_TYPE_C |
207 SDHCI_DRIVER_TYPE_D | 227 SDHCI_DRIVER_TYPE_D |
208 SDHCI_SUPPORT_DDR50, 228 SDHCI_SUPPORT_DDR50,
209 .mmc_caps = MMC_CAP_1_8V_DDR,
210}; 229};
211 230
212static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = { 231static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index a561705f232c..be198cc0b10c 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1552,22 +1552,26 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1552 if (!ioaddr) { 1552 if (!ioaddr) {
1553 if (pcnet32_debug & NETIF_MSG_PROBE) 1553 if (pcnet32_debug & NETIF_MSG_PROBE)
1554 pr_err("card has no PCI IO resources, aborting\n"); 1554 pr_err("card has no PCI IO resources, aborting\n");
1555 return -ENODEV; 1555 err = -ENODEV;
1556 goto err_disable_dev;
1556 } 1557 }
1557 1558
1558 err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); 1559 err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
1559 if (err) { 1560 if (err) {
1560 if (pcnet32_debug & NETIF_MSG_PROBE) 1561 if (pcnet32_debug & NETIF_MSG_PROBE)
1561 pr_err("architecture does not support 32bit PCI busmaster DMA\n"); 1562 pr_err("architecture does not support 32bit PCI busmaster DMA\n");
1562 return err; 1563 goto err_disable_dev;
1563 } 1564 }
1564 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { 1565 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
1565 if (pcnet32_debug & NETIF_MSG_PROBE) 1566 if (pcnet32_debug & NETIF_MSG_PROBE)
1566 pr_err("io address range already allocated\n"); 1567 pr_err("io address range already allocated\n");
1567 return -EBUSY; 1568 err = -EBUSY;
1569 goto err_disable_dev;
1568 } 1570 }
1569 1571
1570 err = pcnet32_probe1(ioaddr, 1, pdev); 1572 err = pcnet32_probe1(ioaddr, 1, pdev);
1573
1574err_disable_dev:
1571 if (err < 0) 1575 if (err < 0)
1572 pci_disable_device(pdev); 1576 pci_disable_device(pdev);
1573 1577
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 81684acf52af..8a8b12b720ef 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2747,11 +2747,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2747 pci_set_master(pdev); 2747 pci_set_master(pdev);
2748 2748
2749 /* Query PCI controller on system for DMA addressing 2749 /* Query PCI controller on system for DMA addressing
2750 * limitation for the device. Try 64-bit first, and 2750 * limitation for the device. Try 47-bit first, and
2751 * fail to 32-bit. 2751 * fail to 32-bit.
2752 */ 2752 */
2753 2753
2754 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2754 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
2755 if (err) { 2755 if (err) {
2756 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2756 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2757 if (err) { 2757 if (err) {
@@ -2765,10 +2765,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2765 goto err_out_release_regions; 2765 goto err_out_release_regions;
2766 } 2766 }
2767 } else { 2767 } else {
2768 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2768 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
2769 if (err) { 2769 if (err) {
2770 dev_err(dev, "Unable to obtain %u-bit DMA " 2770 dev_err(dev, "Unable to obtain %u-bit DMA "
2771 "for consistent allocations, aborting\n", 64); 2771 "for consistent allocations, aborting\n", 47);
2772 goto err_out_release_regions; 2772 goto err_out_release_regions;
2773 } 2773 }
2774 using_dac = 1; 2774 using_dac = 1;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d4604bc8eb5b..9d3eed46830d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index f81439796ac7..43d973215040 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -1,20 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Fast Ethernet Controller (ENET) PTP driver for MX6x. 3 * Fast Ethernet Controller (ENET) PTP driver for MX6x.
3 * 4 *
4 * Copyright (C) 2012 Freescale Semiconductor, Inc. 5 * Copyright (C) 2012 Freescale Semiconductor, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */ 6 */
19 7
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4bb4646a5f92..5ec1185808e5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -796,9 +796,11 @@ static int ibmvnic_login(struct net_device *netdev)
796 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 796 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
797 unsigned long timeout = msecs_to_jiffies(30000); 797 unsigned long timeout = msecs_to_jiffies(30000);
798 int retry_count = 0; 798 int retry_count = 0;
799 bool retry;
799 int rc; 800 int rc;
800 801
801 do { 802 do {
803 retry = false;
802 if (retry_count > IBMVNIC_MAX_QUEUES) { 804 if (retry_count > IBMVNIC_MAX_QUEUES) {
803 netdev_warn(netdev, "Login attempts exceeded\n"); 805 netdev_warn(netdev, "Login attempts exceeded\n");
804 return -1; 806 return -1;
@@ -822,6 +824,9 @@ static int ibmvnic_login(struct net_device *netdev)
822 retry_count++; 824 retry_count++;
823 release_sub_crqs(adapter, 1); 825 release_sub_crqs(adapter, 1);
824 826
827 retry = true;
828 netdev_dbg(netdev,
829 "Received partial success, retrying...\n");
825 adapter->init_done_rc = 0; 830 adapter->init_done_rc = 0;
826 reinit_completion(&adapter->init_done); 831 reinit_completion(&adapter->init_done);
827 send_cap_queries(adapter); 832 send_cap_queries(adapter);
@@ -849,7 +854,7 @@ static int ibmvnic_login(struct net_device *netdev)
849 netdev_warn(netdev, "Adapter login failed\n"); 854 netdev_warn(netdev, "Adapter login failed\n");
850 return -1; 855 return -1;
851 } 856 }
852 } while (adapter->init_done_rc == PARTIALSUCCESS); 857 } while (retry);
853 858
854 /* handle pending MAC address changes after successful login */ 859 /* handle pending MAC address changes after successful login */
855 if (adapter->mac_change_pending) { 860 if (adapter->mac_change_pending) {
@@ -2617,18 +2622,21 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2617{ 2622{
2618 struct device *dev = &adapter->vdev->dev; 2623 struct device *dev = &adapter->vdev->dev;
2619 unsigned long rc; 2624 unsigned long rc;
2620 u64 val;
2621 2625
2622 if (scrq->hw_irq > 0x100000000ULL) { 2626 if (scrq->hw_irq > 0x100000000ULL) {
2623 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 2627 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2624 return 1; 2628 return 1;
2625 } 2629 }
2626 2630
2627 val = (0xff000000) | scrq->hw_irq; 2631 if (adapter->resetting &&
2628 rc = plpar_hcall_norets(H_EOI, val); 2632 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2629 if (rc) 2633 u64 val = (0xff000000) | scrq->hw_irq;
2630 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", 2634
2631 val, rc); 2635 rc = plpar_hcall_norets(H_EOI, val);
2636 if (rc)
2637 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2638 val, rc);
2639 }
2632 2640
2633 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2641 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2634 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 2642 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index a822f7a56bc5..685337d58276 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -43,12 +43,12 @@
43#include "fw.h" 43#include "fw.h"
44 44
45/* 45/*
46 * We allocate in as big chunks as we can, up to a maximum of 256 KB 46 * We allocate in page size (default 4KB on many archs) chunks to avoid high
47 * per chunk. 47 * order memory allocations in fragmented/high usage memory situation.
48 */ 48 */
49enum { 49enum {
50 MLX4_ICM_ALLOC_SIZE = 1 << 18, 50 MLX4_ICM_ALLOC_SIZE = PAGE_SIZE,
51 MLX4_TABLE_CHUNK_SIZE = 1 << 18 51 MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE,
52}; 52};
53 53
54static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) 54static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
@@ -398,9 +398,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
398 u64 size; 398 u64 size;
399 399
400 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; 400 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
401 if (WARN_ON(!obj_per_chunk))
402 return -EINVAL;
401 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; 403 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
402 404
403 table->icm = kcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL); 405 table->icm = kvzalloc(num_icm * sizeof(*table->icm), GFP_KERNEL);
404 if (!table->icm) 406 if (!table->icm)
405 return -ENOMEM; 407 return -ENOMEM;
406 table->virt = virt; 408 table->virt = virt;
@@ -446,7 +448,7 @@ err:
446 mlx4_free_icm(dev, table->icm[i], use_coherent); 448 mlx4_free_icm(dev, table->icm[i], use_coherent);
447 } 449 }
448 450
449 kfree(table->icm); 451 kvfree(table->icm);
450 452
451 return -ENOMEM; 453 return -ENOMEM;
452} 454}
@@ -462,5 +464,5 @@ void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
462 mlx4_free_icm(dev, table->icm[i], table->coherent); 464 mlx4_free_icm(dev, table->icm[i], table->coherent);
463 } 465 }
464 466
465 kfree(table->icm); 467 kvfree(table->icm);
466} 468}
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 2edcce98ab2d..65482f004e50 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -172,7 +172,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
172 list_add_tail(&dev_ctx->list, &priv->ctx_list); 172 list_add_tail(&dev_ctx->list, &priv->ctx_list);
173 spin_unlock_irqrestore(&priv->ctx_lock, flags); 173 spin_unlock_irqrestore(&priv->ctx_lock, flags);
174 174
175 mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n", 175 mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n",
176 dev_ctx->intf->protocol, enable ? 176 dev_ctx->intf->protocol, enable ?
177 "enabled" : "disabled"); 177 "enabled" : "disabled");
178 } 178 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 3aaf4bad6c5a..427e7a31862c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
393 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 393 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
394 struct mlx4_qp *qp; 394 struct mlx4_qp *qp;
395 395
396 spin_lock(&qp_table->lock); 396 spin_lock_irq(&qp_table->lock);
397 397
398 qp = __mlx4_qp_lookup(dev, qpn); 398 qp = __mlx4_qp_lookup(dev, qpn);
399 399
400 spin_unlock(&qp_table->lock); 400 spin_unlock_irq(&qp_table->lock);
401 return qp; 401 return qp;
402} 402}
403 403
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 176645762e49..1ff0b0e93804 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -615,6 +615,45 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
615 return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); 615 return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
616} 616}
617 617
618static __be32 mlx5e_get_fcs(struct sk_buff *skb)
619{
620 int last_frag_sz, bytes_in_prev, nr_frags;
621 u8 *fcs_p1, *fcs_p2;
622 skb_frag_t *last_frag;
623 __be32 fcs_bytes;
624
625 if (!skb_is_nonlinear(skb))
626 return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
627
628 nr_frags = skb_shinfo(skb)->nr_frags;
629 last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
630 last_frag_sz = skb_frag_size(last_frag);
631
632 /* If all FCS data is in last frag */
633 if (last_frag_sz >= ETH_FCS_LEN)
634 return *(__be32 *)(skb_frag_address(last_frag) +
635 last_frag_sz - ETH_FCS_LEN);
636
637 fcs_p2 = (u8 *)skb_frag_address(last_frag);
638 bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
639
640 /* Find where the other part of the FCS is - Linear or another frag */
641 if (nr_frags == 1) {
642 fcs_p1 = skb_tail_pointer(skb);
643 } else {
644 skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
645
646 fcs_p1 = skb_frag_address(prev_frag) +
647 skb_frag_size(prev_frag);
648 }
649 fcs_p1 -= bytes_in_prev;
650
651 memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
652 memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
653
654 return fcs_bytes;
655}
656
618static inline void mlx5e_handle_csum(struct net_device *netdev, 657static inline void mlx5e_handle_csum(struct net_device *netdev,
619 struct mlx5_cqe64 *cqe, 658 struct mlx5_cqe64 *cqe,
620 struct mlx5e_rq *rq, 659 struct mlx5e_rq *rq,
@@ -643,6 +682,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
643 skb->csum = csum_partial(skb->data + ETH_HLEN, 682 skb->csum = csum_partial(skb->data + ETH_HLEN,
644 network_depth - ETH_HLEN, 683 network_depth - ETH_HLEN,
645 skb->csum); 684 skb->csum);
685 if (unlikely(netdev->features & NETIF_F_RXFCS))
686 skb->csum = csum_add(skb->csum,
687 (__force __wsum)mlx5e_get_fcs(skb));
646 rq->stats.csum_complete++; 688 rq->stats.csum_complete++;
647 return; 689 return;
648 } 690 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 0f5da499a223..fad8c2e3804e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -237,19 +237,17 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
237 context->buf.sg[0].data = &context->command; 237 context->buf.sg[0].data = &context->command;
238 238
239 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); 239 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
240 list_add_tail(&context->list, &fdev->ipsec->pending_cmds); 240 res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
241 if (!res)
242 list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
241 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); 243 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
242 244
243 res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
244 if (res) { 245 if (res) {
245 mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n", 246 mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
246 res);
247 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
248 list_del(&context->list);
249 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
250 kfree(context); 247 kfree(context);
251 return ERR_PTR(res); 248 return ERR_PTR(res);
252 } 249 }
250
253 /* Context will be freed by wait func after completion */ 251 /* Context will be freed by wait func after completion */
254 return context; 252 return context;
255} 253}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 00f41c145d4d..820b226d6ff8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -77,7 +77,7 @@
77#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET 77#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
78 78
79/* ILT entry structure */ 79/* ILT entry structure */
80#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL 80#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
81#define ILT_ENTRY_PHY_ADDR_SHIFT 0 81#define ILT_ENTRY_PHY_ADDR_SHIFT 0
82#define ILT_ENTRY_VALID_MASK 0x1ULL 82#define ILT_ENTRY_VALID_MASK 0x1ULL
83#define ILT_ENTRY_VALID_SHIFT 52 83#define ILT_ENTRY_VALID_SHIFT 52
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 6838129839ca..e757b09f1889 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev)
61 return rc; 61 return rc;
62 62
63 /* make rcal=100, since rdb default is 000 */ 63 /* make rcal=100, since rdb default is 000 */
64 rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10); 64 rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10);
65 if (rc < 0) 65 if (rc < 0)
66 return rc; 66 return rc;
67 67
68 /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */ 68 /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
69 rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10); 69 rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10);
70 if (rc < 0) 70 if (rc < 0)
71 return rc; 71 return rc;
72 72
73 /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */ 73 /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
74 rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00); 74 rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00);
75 75
76 return 0; 76 return 0;
77} 77}
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 5ad130c3da43..d5e0833d69b9 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
56 /* The register must be written to both the Shadow Register Select and 56 /* The register must be written to both the Shadow Register Select and
57 * the Shadow Read Register Selector 57 * the Shadow Read Register Selector
58 */ 58 */
59 phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | 59 phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK |
60 regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT); 60 regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
61 return phy_read(phydev, MII_BCM54XX_AUX_CTL); 61 return phy_read(phydev, MII_BCM54XX_AUX_CTL);
62} 62}
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 7c73808cbbde..81cceaa412fe 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -14,11 +14,18 @@
14#ifndef _LINUX_BCM_PHY_LIB_H 14#ifndef _LINUX_BCM_PHY_LIB_H
15#define _LINUX_BCM_PHY_LIB_H 15#define _LINUX_BCM_PHY_LIB_H
16 16
17#include <linux/brcmphy.h>
17#include <linux/phy.h> 18#include <linux/phy.h>
18 19
19int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val); 20int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
20int bcm_phy_read_exp(struct phy_device *phydev, u16 reg); 21int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
21 22
23static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
24 u16 reg, u16 val)
25{
26 return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
27}
28
22int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val); 29int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
23int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum); 30int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
24 31
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 29b1c88b55cc..01d2ff2f6241 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv {
65static void r_rc_cal_reset(struct phy_device *phydev) 65static void r_rc_cal_reset(struct phy_device *phydev)
66{ 66{
67 /* Reset R_CAL/RC_CAL Engine */ 67 /* Reset R_CAL/RC_CAL Engine */
68 bcm_phy_write_exp(phydev, 0x00b0, 0x0010); 68 bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
69 69
70 /* Disable Reset R_AL/RC_CAL Engine */ 70 /* Disable Reset R_AL/RC_CAL Engine */
71 bcm_phy_write_exp(phydev, 0x00b0, 0x0000); 71 bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
72} 72}
73 73
74static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev) 74static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index dc7c7ec43202..02ad03a2fab7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -605,30 +605,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
605 605
606 if (cmd == PPPIOCDETACH) { 606 if (cmd == PPPIOCDETACH) {
607 /* 607 /*
608 * We have to be careful here... if the file descriptor 608 * PPPIOCDETACH is no longer supported as it was heavily broken,
609 * has been dup'd, we could have another process in the 609 * and is only known to have been used by pppd older than
610 * middle of a poll using the same file *, so we had 610 * ppp-2.4.2 (released November 2003).
611 * better not free the interface data structures -
612 * instead we fail the ioctl. Even in this case, we
613 * shut down the interface if we are the owner of it.
614 * Actually, we should get rid of PPPIOCDETACH, userland
615 * (i.e. pppd) could achieve the same effect by closing
616 * this fd and reopening /dev/ppp.
617 */ 611 */
612 pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
613 current->comm, current->pid);
618 err = -EINVAL; 614 err = -EINVAL;
619 if (pf->kind == INTERFACE) {
620 ppp = PF_TO_PPP(pf);
621 rtnl_lock();
622 if (file == ppp->owner)
623 unregister_netdevice(ppp->dev);
624 rtnl_unlock();
625 }
626 if (atomic_long_read(&file->f_count) < 2) {
627 ppp_release(NULL, file);
628 err = 0;
629 } else
630 pr_warn("PPPIOCDETACH file->f_count=%ld\n",
631 atomic_long_read(&file->f_count));
632 goto out; 615 goto out;
633 } 616 }
634 617
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d45ac37e1287..45d807796a18 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1423,6 +1423,13 @@ static void tun_net_init(struct net_device *dev)
1423 dev->max_mtu = MAX_MTU - dev->hard_header_len; 1423 dev->max_mtu = MAX_MTU - dev->hard_header_len;
1424} 1424}
1425 1425
1426static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1427{
1428 struct sock *sk = tfile->socket.sk;
1429
1430 return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1431}
1432
1426/* Character device part */ 1433/* Character device part */
1427 1434
1428/* Poll */ 1435/* Poll */
@@ -1445,10 +1452,14 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1445 if (!ptr_ring_empty(&tfile->tx_ring)) 1452 if (!ptr_ring_empty(&tfile->tx_ring))
1446 mask |= EPOLLIN | EPOLLRDNORM; 1453 mask |= EPOLLIN | EPOLLRDNORM;
1447 1454
1448 if (tun->dev->flags & IFF_UP && 1455 /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1449 (sock_writeable(sk) || 1456 * guarantee EPOLLOUT to be raised by either here or
1450 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1457 * tun_sock_write_space(). Then process could get notification
1451 sock_writeable(sk)))) 1458 * after it writes to a down device and meets -EIO.
1459 */
1460 if (tun_sock_writeable(tun, tfile) ||
1461 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1462 tun_sock_writeable(tun, tfile)))
1452 mask |= EPOLLOUT | EPOLLWRNORM; 1463 mask |= EPOLLOUT | EPOLLWRNORM;
1453 1464
1454 if (tun->dev->reg_state != NETREG_REGISTERED) 1465 if (tun->dev->reg_state != NETREG_REGISTERED)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 770422e953f7..032e1ac10a30 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -707,6 +707,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
707 void *data; 707 void *data;
708 u32 act; 708 u32 act;
709 709
710 /* Transient failure which in theory could occur if
711 * in-flight packets from before XDP was enabled reach
712 * the receive path after XDP is loaded.
713 */
714 if (unlikely(hdr->hdr.gso_type))
715 goto err_xdp;
716
710 /* This happens when rx buffer size is underestimated 717 /* This happens when rx buffer size is underestimated
711 * or headroom is not enough because of the buffer 718 * or headroom is not enough because of the buffer
712 * was refilled before XDP is set. This should only 719 * was refilled before XDP is set. This should only
@@ -727,14 +734,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
727 xdp_page = page; 734 xdp_page = page;
728 } 735 }
729 736
730 /* Transient failure which in theory could occur if
731 * in-flight packets from before XDP was enabled reach
732 * the receive path after XDP is loaded. In practice I
733 * was not able to create this condition.
734 */
735 if (unlikely(hdr->hdr.gso_type))
736 goto err_xdp;
737
738 /* Allow consuming headroom but reserve enough space to push 737 /* Allow consuming headroom but reserve enough space to push
739 * the descriptor on if we get an XDP_TX return code. 738 * the descriptor on if we get an XDP_TX return code.
740 */ 739 */
@@ -775,7 +774,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
775 } 774 }
776 *xdp_xmit = true; 775 *xdp_xmit = true;
777 if (unlikely(xdp_page != page)) 776 if (unlikely(xdp_page != page))
778 goto err_xdp; 777 put_page(page);
779 rcu_read_unlock(); 778 rcu_read_unlock();
780 goto xdp_xmit; 779 goto xdp_xmit;
781 case XDP_REDIRECT: 780 case XDP_REDIRECT:
@@ -787,7 +786,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
787 } 786 }
788 *xdp_xmit = true; 787 *xdp_xmit = true;
789 if (unlikely(xdp_page != page)) 788 if (unlikely(xdp_page != page))
790 goto err_xdp; 789 put_page(page);
791 rcu_read_unlock(); 790 rcu_read_unlock();
792 goto xdp_xmit; 791 goto xdp_xmit;
793 default: 792 default:
@@ -875,7 +874,7 @@ err_xdp:
875 rcu_read_unlock(); 874 rcu_read_unlock();
876err_skb: 875err_skb:
877 put_page(page); 876 put_page(page);
878 while (--num_buf) { 877 while (num_buf-- > 1) {
879 buf = virtqueue_get_buf(rq->vq, &len); 878 buf = virtqueue_get_buf(rq->vq, &len);
880 if (unlikely(!buf)) { 879 if (unlikely(!buf)) {
881 pr_debug("%s: rx error: %d buffers missing\n", 880 pr_debug("%s: rx error: %d buffers missing\n",
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4a017a0d71ea..920c23e542a5 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3340,7 +3340,7 @@ out_err:
3340static int hwsim_dump_radio_nl(struct sk_buff *skb, 3340static int hwsim_dump_radio_nl(struct sk_buff *skb,
3341 struct netlink_callback *cb) 3341 struct netlink_callback *cb)
3342{ 3342{
3343 int last_idx = cb->args[0]; 3343 int last_idx = cb->args[0] - 1;
3344 struct mac80211_hwsim_data *data = NULL; 3344 struct mac80211_hwsim_data *data = NULL;
3345 int res = 0; 3345 int res = 0;
3346 void *hdr; 3346 void *hdr;
@@ -3368,7 +3368,7 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
3368 last_idx = data->idx; 3368 last_idx = data->idx;
3369 } 3369 }
3370 3370
3371 cb->args[0] = last_idx; 3371 cb->args[0] = last_idx + 1;
3372 3372
3373 /* list changed, but no new element sent, set interrupted flag */ 3373 /* list changed, but no new element sent, set interrupted flag */
3374 if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) { 3374 if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) {
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 9371651d8017..c574dd210500 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -117,7 +117,7 @@ config SSB_SERIAL
117 117
118config SSB_DRIVER_PCICORE_POSSIBLE 118config SSB_DRIVER_PCICORE_POSSIBLE
119 bool 119 bool
120 depends on SSB_PCIHOST && SSB = y 120 depends on SSB_PCIHOST
121 default y 121 default y
122 122
123config SSB_DRIVER_PCICORE 123config SSB_DRIVER_PCICORE
@@ -131,7 +131,7 @@ config SSB_DRIVER_PCICORE
131 131
132config SSB_PCICORE_HOSTMODE 132config SSB_PCICORE_HOSTMODE
133 bool "Hostmode support for SSB PCI core" 133 bool "Hostmode support for SSB PCI core"
134 depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS 134 depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS && SSB = y
135 help 135 help
136 PCIcore hostmode operation (external PCI bus). 136 PCIcore hostmode operation (external PCI bus).
137 137
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index f3bd8e941224..f0be5f35ab28 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -981,6 +981,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
981{ 981{
982 int ret = 0; 982 int ret = 0;
983 983
984 mutex_lock(&dev->mutex);
984 vhost_dev_lock_vqs(dev); 985 vhost_dev_lock_vqs(dev);
985 switch (msg->type) { 986 switch (msg->type) {
986 case VHOST_IOTLB_UPDATE: 987 case VHOST_IOTLB_UPDATE:
@@ -1016,6 +1017,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1016 } 1017 }
1017 1018
1018 vhost_dev_unlock_vqs(dev); 1019 vhost_dev_unlock_vqs(dev);
1020 mutex_unlock(&dev->mutex);
1021
1019 return ret; 1022 return ret;
1020} 1023}
1021ssize_t vhost_chr_write_iter(struct vhost_dev *dev, 1024ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 91a8889abf9b..ea8c551bcd7e 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -570,16 +570,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
570 current_page, vec_len, vec_start); 570 current_page, vec_len, vec_start);
571 571
572 len = bio_add_page(bio, page, vec_len, vec_start); 572 len = bio_add_page(bio, page, vec_len, vec_start);
573 if (len != vec_len) { 573 if (len != vec_len) break;
574 mlog(ML_ERROR, "Adding page[%d] to bio failed, "
575 "page %p, len %d, vec_len %u, vec_start %u, "
576 "bi_sector %llu\n", current_page, page, len,
577 vec_len, vec_start,
578 (unsigned long long)bio->bi_iter.bi_sector);
579 bio_put(bio);
580 bio = ERR_PTR(-EIO);
581 return bio;
582 }
583 574
584 cs += vec_len / (PAGE_SIZE/spp); 575 cs += vec_len / (PAGE_SIZE/spp);
585 vec_start = 0; 576 vec_start = 0;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index c6c27f1f9c98..4cc090b50cc5 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -709,11 +709,6 @@ void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter,
709 if (m->count + width >= m->size) 709 if (m->count + width >= m->size)
710 goto overflow; 710 goto overflow;
711 711
712 if (num < 10) {
713 m->buf[m->count++] = num + '0';
714 return;
715 }
716
717 len = num_to_str(m->buf + m->count, m->size - m->count, num, width); 712 len = num_to_str(m->buf + m->count, m->size - m->count, num, width);
718 if (!len) 713 if (!len)
719 goto overflow; 714 goto overflow;
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 65cfc2f59db9..df36b1b08af0 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -142,7 +142,7 @@ struct bpf_verifier_state_list {
142struct bpf_insn_aux_data { 142struct bpf_insn_aux_data {
143 union { 143 union {
144 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 144 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
145 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ 145 unsigned long map_state; /* pointer/poison value for maps */
146 s32 call_imm; /* saved imm field of call insn */ 146 s32 call_imm; /* saved imm field of call insn */
147 }; 147 };
148 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 148 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 1a4582b44d32..fc5ab85278d5 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -464,7 +464,7 @@ static inline struct page *
464__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) 464__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
465{ 465{
466 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); 466 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
467 VM_WARN_ON(!node_online(nid)); 467 VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
468 468
469 return __alloc_pages(gfp_mask, order, nid); 469 return __alloc_pages(gfp_mask, order, nid);
470} 470}
diff --git a/include/linux/node.h b/include/linux/node.h
index 41f171861dcc..6d336e38d155 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -32,9 +32,11 @@ extern struct node *node_devices[];
32typedef void (*node_registration_func_t)(struct node *); 32typedef void (*node_registration_func_t)(struct node *);
33 33
34#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) 34#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
35extern int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages); 35extern int link_mem_sections(int nid, unsigned long start_pfn,
36 unsigned long nr_pages, bool check_nid);
36#else 37#else
37static inline int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages) 38static inline int link_mem_sections(int nid, unsigned long start_pfn,
39 unsigned long nr_pages, bool check_nid)
38{ 40{
39 return 0; 41 return 0;
40} 42}
@@ -57,7 +59,7 @@ static inline int register_one_node(int nid)
57 if (error) 59 if (error)
58 return error; 60 return error;
59 /* link memory sections under this node */ 61 /* link memory sections under this node */
60 error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages); 62 error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages, true);
61 } 63 }
62 64
63 return error; 65 return error;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 28b996d63490..35498e613ff5 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -103,6 +103,8 @@ void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
103/* 103/*
104 * sctp/socket.c 104 * sctp/socket.c
105 */ 105 */
106int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
107 int addr_len, int flags);
106int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 108int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
107int sctp_inet_listen(struct socket *sock, int backlog); 109int sctp_inet_listen(struct socket *sock, int backlog);
108void sctp_write_space(struct sock *sk); 110void sctp_write_space(struct sock *sk);
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index bc01e06bc716..0be866c91f62 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -435,7 +435,9 @@ TRACE_EVENT(sched_pi_setprio,
435 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); 435 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
436 __entry->pid = tsk->pid; 436 __entry->pid = tsk->pid;
437 __entry->oldprio = tsk->prio; 437 __entry->oldprio = tsk->prio;
438 __entry->newprio = pi_task ? pi_task->prio : tsk->prio; 438 __entry->newprio = pi_task ?
439 min(tsk->normal_prio, pi_task->prio) :
440 tsk->normal_prio;
439 /* XXX SCHED_DEADLINE bits missing */ 441 /* XXX SCHED_DEADLINE bits missing */
440 ), 442 ),
441 443
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 9c3630146cec..271b93783d28 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2698,7 +2698,7 @@ enum nl80211_attrs {
2698#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS 2698#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
2699#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS 2699#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
2700 2700
2701#define NL80211_WIPHY_NAME_MAXLEN 128 2701#define NL80211_WIPHY_NAME_MAXLEN 64
2702 2702
2703#define NL80211_MAX_SUPP_RATES 32 2703#define NL80211_MAX_SUPP_RATES 32
2704#define NL80211_MAX_SUPP_HT_RATES 77 2704#define NL80211_MAX_SUPP_HT_RATES 77
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h
index b19a9c249b15..784c2e3e572e 100644
--- a/include/uapi/linux/ppp-ioctl.h
+++ b/include/uapi/linux/ppp-ioctl.h
@@ -106,7 +106,7 @@ struct pppol2tp_ioc_stats {
106#define PPPIOCGIDLE _IOR('t', 63, struct ppp_idle) /* get idle time */ 106#define PPPIOCGIDLE _IOR('t', 63, struct ppp_idle) /* get idle time */
107#define PPPIOCNEWUNIT _IOWR('t', 62, int) /* create new ppp unit */ 107#define PPPIOCNEWUNIT _IOWR('t', 62, int) /* create new ppp unit */
108#define PPPIOCATTACH _IOW('t', 61, int) /* attach to ppp unit */ 108#define PPPIOCATTACH _IOW('t', 61, int) /* attach to ppp unit */
109#define PPPIOCDETACH _IOW('t', 60, int) /* detach from ppp unit/chan */ 109#define PPPIOCDETACH _IOW('t', 60, int) /* obsolete, do not use */
110#define PPPIOCSMRRU _IOW('t', 59, int) /* set multilink MRU */ 110#define PPPIOCSMRRU _IOW('t', 59, int) /* set multilink MRU */
111#define PPPIOCCONNECT _IOW('t', 58, int) /* connect channel to unit */ 111#define PPPIOCCONNECT _IOW('t', 58, int) /* connect channel to unit */
112#define PPPIOCDISCONN _IO('t', 57) /* disconnect channel */ 112#define PPPIOCDISCONN _IO('t', 57) /* disconnect channel */
diff --git a/init/main.c b/init/main.c
index fd37315835b4..3b4ada11ed52 100644
--- a/init/main.c
+++ b/init/main.c
@@ -91,6 +91,7 @@
91#include <linux/cache.h> 91#include <linux/cache.h>
92#include <linux/rodata_test.h> 92#include <linux/rodata_test.h>
93#include <linux/jump_label.h> 93#include <linux/jump_label.h>
94#include <linux/mem_encrypt.h>
94 95
95#include <asm/io.h> 96#include <asm/io.h>
96#include <asm/bugs.h> 97#include <asm/bugs.h>
diff --git a/ipc/shm.c b/ipc/shm.c
index 3cf48988d68c..d73269381ec7 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1363,14 +1363,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1363 1363
1364 if (addr) { 1364 if (addr) {
1365 if (addr & (shmlba - 1)) { 1365 if (addr & (shmlba - 1)) {
1366 /* 1366 if (shmflg & SHM_RND) {
1367 * Round down to the nearest multiple of shmlba. 1367 addr &= ~(shmlba - 1); /* round down */
1368 * For sane do_mmap_pgoff() parameters, avoid 1368
1369 * round downs that trigger nil-page and MAP_FIXED. 1369 /*
1370 */ 1370 * Ensure that the round-down is non-nil
1371 if ((shmflg & SHM_RND) && addr >= shmlba) 1371 * when remapping. This can happen for
1372 addr &= ~(shmlba - 1); 1372 * cases when addr < shmlba.
1373 else 1373 */
1374 if (!addr && (shmflg & SHM_REMAP))
1375 goto out;
1376 } else
1374#ifndef __ARCH_FORCE_SHMLBA 1377#ifndef __ARCH_FORCE_SHMLBA
1375 if (addr & ~PAGE_MASK) 1378 if (addr & ~PAGE_MASK)
1376#endif 1379#endif
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2ce967a63ede..1904e814f282 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -156,7 +156,29 @@ struct bpf_verifier_stack_elem {
156#define BPF_COMPLEXITY_LIMIT_INSNS 131072 156#define BPF_COMPLEXITY_LIMIT_INSNS 131072
157#define BPF_COMPLEXITY_LIMIT_STACK 1024 157#define BPF_COMPLEXITY_LIMIT_STACK 1024
158 158
159#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) 159#define BPF_MAP_PTR_UNPRIV 1UL
160#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
161 POISON_POINTER_DELTA))
162#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
163
164static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
165{
166 return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
167}
168
169static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
170{
171 return aux->map_state & BPF_MAP_PTR_UNPRIV;
172}
173
174static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
175 const struct bpf_map *map, bool unpriv)
176{
177 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
178 unpriv |= bpf_map_ptr_unpriv(aux);
179 aux->map_state = (unsigned long)map |
180 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
181}
160 182
161struct bpf_call_arg_meta { 183struct bpf_call_arg_meta {
162 struct bpf_map *map_ptr; 184 struct bpf_map *map_ptr;
@@ -2358,6 +2380,29 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
2358 return 0; 2380 return 0;
2359} 2381}
2360 2382
2383static int
2384record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
2385 int func_id, int insn_idx)
2386{
2387 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
2388
2389 if (func_id != BPF_FUNC_tail_call &&
2390 func_id != BPF_FUNC_map_lookup_elem)
2391 return 0;
2392 if (meta->map_ptr == NULL) {
2393 verbose(env, "kernel subsystem misconfigured verifier\n");
2394 return -EINVAL;
2395 }
2396
2397 if (!BPF_MAP_PTR(aux->map_state))
2398 bpf_map_ptr_store(aux, meta->map_ptr,
2399 meta->map_ptr->unpriv_array);
2400 else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
2401 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2402 meta->map_ptr->unpriv_array);
2403 return 0;
2404}
2405
2361static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 2406static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
2362{ 2407{
2363 const struct bpf_func_proto *fn = NULL; 2408 const struct bpf_func_proto *fn = NULL;
@@ -2412,13 +2457,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2412 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 2457 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
2413 if (err) 2458 if (err)
2414 return err; 2459 return err;
2415 if (func_id == BPF_FUNC_tail_call) {
2416 if (meta.map_ptr == NULL) {
2417 verbose(env, "verifier bug\n");
2418 return -EINVAL;
2419 }
2420 env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
2421 }
2422 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 2460 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
2423 if (err) 2461 if (err)
2424 return err; 2462 return err;
@@ -2429,6 +2467,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2429 if (err) 2467 if (err)
2430 return err; 2468 return err;
2431 2469
2470 err = record_func_map(env, &meta, func_id, insn_idx);
2471 if (err)
2472 return err;
2473
2432 /* Mark slots with STACK_MISC in case of raw mode, stack offset 2474 /* Mark slots with STACK_MISC in case of raw mode, stack offset
2433 * is inferred from register state. 2475 * is inferred from register state.
2434 */ 2476 */
@@ -2453,8 +2495,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2453 } else if (fn->ret_type == RET_VOID) { 2495 } else if (fn->ret_type == RET_VOID) {
2454 regs[BPF_REG_0].type = NOT_INIT; 2496 regs[BPF_REG_0].type = NOT_INIT;
2455 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 2497 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
2456 struct bpf_insn_aux_data *insn_aux;
2457
2458 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 2498 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
2459 /* There is no offset yet applied, variable or fixed */ 2499 /* There is no offset yet applied, variable or fixed */
2460 mark_reg_known_zero(env, regs, BPF_REG_0); 2500 mark_reg_known_zero(env, regs, BPF_REG_0);
@@ -2470,11 +2510,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2470 } 2510 }
2471 regs[BPF_REG_0].map_ptr = meta.map_ptr; 2511 regs[BPF_REG_0].map_ptr = meta.map_ptr;
2472 regs[BPF_REG_0].id = ++env->id_gen; 2512 regs[BPF_REG_0].id = ++env->id_gen;
2473 insn_aux = &env->insn_aux_data[insn_idx];
2474 if (!insn_aux->map_ptr)
2475 insn_aux->map_ptr = meta.map_ptr;
2476 else if (insn_aux->map_ptr != meta.map_ptr)
2477 insn_aux->map_ptr = BPF_MAP_PTR_POISON;
2478 } else { 2513 } else {
2479 verbose(env, "unknown return type %d of func %s#%d\n", 2514 verbose(env, "unknown return type %d of func %s#%d\n",
2480 fn->ret_type, func_id_name(func_id), func_id); 2515 fn->ret_type, func_id_name(func_id), func_id);
@@ -5470,6 +5505,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
5470 struct bpf_insn *insn = prog->insnsi; 5505 struct bpf_insn *insn = prog->insnsi;
5471 const struct bpf_func_proto *fn; 5506 const struct bpf_func_proto *fn;
5472 const int insn_cnt = prog->len; 5507 const int insn_cnt = prog->len;
5508 struct bpf_insn_aux_data *aux;
5473 struct bpf_insn insn_buf[16]; 5509 struct bpf_insn insn_buf[16];
5474 struct bpf_prog *new_prog; 5510 struct bpf_prog *new_prog;
5475 struct bpf_map *map_ptr; 5511 struct bpf_map *map_ptr;
@@ -5544,19 +5580,22 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
5544 insn->imm = 0; 5580 insn->imm = 0;
5545 insn->code = BPF_JMP | BPF_TAIL_CALL; 5581 insn->code = BPF_JMP | BPF_TAIL_CALL;
5546 5582
5583 aux = &env->insn_aux_data[i + delta];
5584 if (!bpf_map_ptr_unpriv(aux))
5585 continue;
5586
5547 /* instead of changing every JIT dealing with tail_call 5587 /* instead of changing every JIT dealing with tail_call
5548 * emit two extra insns: 5588 * emit two extra insns:
5549 * if (index >= max_entries) goto out; 5589 * if (index >= max_entries) goto out;
5550 * index &= array->index_mask; 5590 * index &= array->index_mask;
5551 * to avoid out-of-bounds cpu speculation 5591 * to avoid out-of-bounds cpu speculation
5552 */ 5592 */
5553 map_ptr = env->insn_aux_data[i + delta].map_ptr; 5593 if (bpf_map_ptr_poisoned(aux)) {
5554 if (map_ptr == BPF_MAP_PTR_POISON) {
5555 verbose(env, "tail_call abusing map_ptr\n"); 5594 verbose(env, "tail_call abusing map_ptr\n");
5556 return -EINVAL; 5595 return -EINVAL;
5557 } 5596 }
5558 if (!map_ptr->unpriv_array) 5597
5559 continue; 5598 map_ptr = BPF_MAP_PTR(aux->map_state);
5560 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 5599 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
5561 map_ptr->max_entries, 2); 5600 map_ptr->max_entries, 2);
5562 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 5601 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
@@ -5580,9 +5619,12 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
5580 */ 5619 */
5581 if (prog->jit_requested && BITS_PER_LONG == 64 && 5620 if (prog->jit_requested && BITS_PER_LONG == 64 &&
5582 insn->imm == BPF_FUNC_map_lookup_elem) { 5621 insn->imm == BPF_FUNC_map_lookup_elem) {
5583 map_ptr = env->insn_aux_data[i + delta].map_ptr; 5622 aux = &env->insn_aux_data[i + delta];
5584 if (map_ptr == BPF_MAP_PTR_POISON || 5623 if (bpf_map_ptr_poisoned(aux))
5585 !map_ptr->ops->map_gen_lookup) 5624 goto patch_call_imm;
5625
5626 map_ptr = BPF_MAP_PTR(aux->map_state);
5627 if (!map_ptr->ops->map_gen_lookup)
5586 goto patch_call_imm; 5628 goto patch_call_imm;
5587 5629
5588 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); 5630 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 2017a39ab490..481951bf091d 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -193,7 +193,7 @@ EXPORT_SYMBOL_GPL(kthread_parkme);
193 193
194void kthread_park_complete(struct task_struct *k) 194void kthread_park_complete(struct task_struct *k)
195{ 195{
196 complete(&to_kthread(k)->parked); 196 complete_all(&to_kthread(k)->parked);
197} 197}
198 198
199static int kthread(void *_create) 199static int kthread(void *_create)
@@ -459,6 +459,7 @@ void kthread_unpark(struct task_struct *k)
459 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) 459 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
460 __kthread_bind(k, kthread->cpu, TASK_PARKED); 460 __kthread_bind(k, kthread->cpu, TASK_PARKED);
461 461
462 reinit_completion(&kthread->parked);
462 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 463 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
463 wake_up_state(k, TASK_PARKED); 464 wake_up_state(k, TASK_PARKED);
464} 465}
@@ -483,9 +484,6 @@ int kthread_park(struct task_struct *k)
483 if (WARN_ON(k->flags & PF_EXITING)) 484 if (WARN_ON(k->flags & PF_EXITING))
484 return -ENOSYS; 485 return -ENOSYS;
485 486
486 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
487 return -EBUSY;
488
489 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 487 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
490 if (k != current) { 488 if (k != current) {
491 wake_up_process(k); 489 wake_up_process(k);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 64cc564f5255..61a1125c1ae4 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1708,7 +1708,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
1708 rcu_read_unlock(); 1708 rcu_read_unlock();
1709 1709
1710 if (rq && sched_debug_enabled) { 1710 if (rq && sched_debug_enabled) {
1711 pr_info("span: %*pbl (max cpu_capacity = %lu)\n", 1711 pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
1712 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); 1712 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
1713 } 1713 }
1714 1714
diff --git a/kernel/sys.c b/kernel/sys.c
index b0eee418ee0d..d1b2b8d934bb 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -71,6 +71,9 @@
71#include <asm/io.h> 71#include <asm/io.h>
72#include <asm/unistd.h> 72#include <asm/unistd.h>
73 73
74/* Hardening for Spectre-v1 */
75#include <linux/nospec.h>
76
74#include "uid16.h" 77#include "uid16.h"
75 78
76#ifndef SET_UNALIGN_CTL 79#ifndef SET_UNALIGN_CTL
@@ -1453,6 +1456,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1453 if (resource >= RLIM_NLIMITS) 1456 if (resource >= RLIM_NLIMITS)
1454 return -EINVAL; 1457 return -EINVAL;
1455 1458
1459 resource = array_index_nospec(resource, RLIM_NLIMITS);
1456 task_lock(current->group_leader); 1460 task_lock(current->group_leader);
1457 x = current->signal->rlim[resource]; 1461 x = current->signal->rlim[resource];
1458 task_unlock(current->group_leader); 1462 task_unlock(current->group_leader);
@@ -1472,6 +1476,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1472 if (resource >= RLIM_NLIMITS) 1476 if (resource >= RLIM_NLIMITS)
1473 return -EINVAL; 1477 return -EINVAL;
1474 1478
1479 resource = array_index_nospec(resource, RLIM_NLIMITS);
1475 task_lock(current->group_leader); 1480 task_lock(current->group_leader);
1476 r = current->signal->rlim[resource]; 1481 r = current->signal->rlim[resource];
1477 task_unlock(current->group_leader); 1482 task_unlock(current->group_leader);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 43e0cbedc3a0..a9e41aed6de4 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -2034,10 +2034,12 @@ void *radix_tree_delete_item(struct radix_tree_root *root,
2034 unsigned long index, void *item) 2034 unsigned long index, void *item)
2035{ 2035{
2036 struct radix_tree_node *node = NULL; 2036 struct radix_tree_node *node = NULL;
2037 void __rcu **slot; 2037 void __rcu **slot = NULL;
2038 void *entry; 2038 void *entry;
2039 2039
2040 entry = __radix_tree_lookup(root, index, &node, &slot); 2040 entry = __radix_tree_lookup(root, index, &node, &slot);
2041 if (!slot)
2042 return NULL;
2041 if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, 2043 if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
2042 get_slot_offset(node, slot)))) 2044 get_slot_offset(node, slot))))
2043 return NULL; 2045 return NULL;
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index bc0e68f7dc75..f185455b3406 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -792,6 +792,40 @@ DEFINE_ASAN_SET_SHADOW(f5);
792DEFINE_ASAN_SET_SHADOW(f8); 792DEFINE_ASAN_SET_SHADOW(f8);
793 793
794#ifdef CONFIG_MEMORY_HOTPLUG 794#ifdef CONFIG_MEMORY_HOTPLUG
795static bool shadow_mapped(unsigned long addr)
796{
797 pgd_t *pgd = pgd_offset_k(addr);
798 p4d_t *p4d;
799 pud_t *pud;
800 pmd_t *pmd;
801 pte_t *pte;
802
803 if (pgd_none(*pgd))
804 return false;
805 p4d = p4d_offset(pgd, addr);
806 if (p4d_none(*p4d))
807 return false;
808 pud = pud_offset(p4d, addr);
809 if (pud_none(*pud))
810 return false;
811
812 /*
813 * We can't use pud_large() or pud_huge(), the first one is
814 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
815 * pud_bad(), if pud is bad then it's bad because it's huge.
816 */
817 if (pud_bad(*pud))
818 return true;
819 pmd = pmd_offset(pud, addr);
820 if (pmd_none(*pmd))
821 return false;
822
823 if (pmd_bad(*pmd))
824 return true;
825 pte = pte_offset_kernel(pmd, addr);
826 return !pte_none(*pte);
827}
828
795static int __meminit kasan_mem_notifier(struct notifier_block *nb, 829static int __meminit kasan_mem_notifier(struct notifier_block *nb,
796 unsigned long action, void *data) 830 unsigned long action, void *data)
797{ 831{
@@ -813,6 +847,14 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
813 case MEM_GOING_ONLINE: { 847 case MEM_GOING_ONLINE: {
814 void *ret; 848 void *ret;
815 849
850 /*
851 * If shadow is mapped already than it must have been mapped
852 * during the boot. This could happen if we onlining previously
853 * offlined memory.
854 */
855 if (shadow_mapped(shadow_start))
856 return NOTIFY_OK;
857
816 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, 858 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
817 shadow_end, GFP_KERNEL, 859 shadow_end, GFP_KERNEL,
818 PAGE_KERNEL, VM_NO_GUARD, 860 PAGE_KERNEL, VM_NO_GUARD,
@@ -824,8 +866,26 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
824 kmemleak_ignore(ret); 866 kmemleak_ignore(ret);
825 return NOTIFY_OK; 867 return NOTIFY_OK;
826 } 868 }
827 case MEM_OFFLINE: 869 case MEM_CANCEL_ONLINE:
828 vfree((void *)shadow_start); 870 case MEM_OFFLINE: {
871 struct vm_struct *vm;
872
873 /*
874 * shadow_start was either mapped during boot by kasan_init()
875 * or during memory online by __vmalloc_node_range().
876 * In the latter case we can use vfree() to free shadow.
877 * Non-NULL result of the find_vm_area() will tell us if
878 * that was the second case.
879 *
880 * Currently it's not possible to free shadow mapped
881 * during boot by kasan_init(). It's because the code
882 * to do that hasn't been written yet. So we'll just
883 * leak the memory.
884 */
885 vm = find_vm_area((void *)shadow_start);
886 if (vm)
887 vfree((void *)shadow_start);
888 }
829 } 889 }
830 890
831 return NOTIFY_OK; 891 return NOTIFY_OK;
@@ -838,5 +898,5 @@ static int __init kasan_memhotplug_init(void)
838 return 0; 898 return 0;
839} 899}
840 900
841module_init(kasan_memhotplug_init); 901core_initcall(kasan_memhotplug_init);
842#endif 902#endif
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index f74826cdceea..25982467800b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1158,7 +1158,7 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
1158 * nodes have to go through register_node. 1158 * nodes have to go through register_node.
1159 * TODO clean up this mess. 1159 * TODO clean up this mess.
1160 */ 1160 */
1161 ret = link_mem_sections(nid, start_pfn, nr_pages); 1161 ret = link_mem_sections(nid, start_pfn, nr_pages, false);
1162register_fail: 1162register_fail:
1163 /* 1163 /*
1164 * If sysfs file of new node can't create, cpu on the node 1164 * If sysfs file of new node can't create, cpu on the node
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 511a7124d7f9..22320ea27489 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7598,11 +7598,12 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7598 unsigned long pfn, iter, found; 7598 unsigned long pfn, iter, found;
7599 7599
7600 /* 7600 /*
7601 * For avoiding noise data, lru_add_drain_all() should be called 7601 * TODO we could make this much more efficient by not checking every
7602 * If ZONE_MOVABLE, the zone never contains unmovable pages 7602 * page in the range if we know all of them are in MOVABLE_ZONE and
7603 * that the movable zone guarantees that pages are migratable but
7604 * the later is not the case right now unfortunatelly. E.g. movablecore
7605 * can still lead to having bootmem allocations in zone_movable.
7603 */ 7606 */
7604 if (zone_idx(zone) == ZONE_MOVABLE)
7605 return false;
7606 7607
7607 /* 7608 /*
7608 * CMA allocations (alloc_contig_range) really need to mark isolate 7609 * CMA allocations (alloc_contig_range) really need to mark isolate
@@ -7623,7 +7624,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7623 page = pfn_to_page(check); 7624 page = pfn_to_page(check);
7624 7625
7625 if (PageReserved(page)) 7626 if (PageReserved(page))
7626 return true; 7627 goto unmovable;
7627 7628
7628 /* 7629 /*
7629 * Hugepages are not in LRU lists, but they're movable. 7630 * Hugepages are not in LRU lists, but they're movable.
@@ -7673,9 +7674,12 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7673 * page at boot. 7674 * page at boot.
7674 */ 7675 */
7675 if (found > count) 7676 if (found > count)
7676 return true; 7677 goto unmovable;
7677 } 7678 }
7678 return false; 7679 return false;
7680unmovable:
7681 WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
7682 return true;
7679} 7683}
7680 7684
7681bool is_pageblock_removable_nolock(struct page *page) 7685bool is_pageblock_removable_nolock(struct page *page)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index cc2cf04d9018..78a015fcec3b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3112,6 +3112,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3112 unsigned long *frontswap_map = NULL; 3112 unsigned long *frontswap_map = NULL;
3113 struct page *page = NULL; 3113 struct page *page = NULL;
3114 struct inode *inode = NULL; 3114 struct inode *inode = NULL;
3115 bool inced_nr_rotate_swap = false;
3115 3116
3116 if (swap_flags & ~SWAP_FLAGS_VALID) 3117 if (swap_flags & ~SWAP_FLAGS_VALID)
3117 return -EINVAL; 3118 return -EINVAL;
@@ -3215,8 +3216,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3215 cluster = per_cpu_ptr(p->percpu_cluster, cpu); 3216 cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3216 cluster_set_null(&cluster->index); 3217 cluster_set_null(&cluster->index);
3217 } 3218 }
3218 } else 3219 } else {
3219 atomic_inc(&nr_rotate_swap); 3220 atomic_inc(&nr_rotate_swap);
3221 inced_nr_rotate_swap = true;
3222 }
3220 3223
3221 error = swap_cgroup_swapon(p->type, maxpages); 3224 error = swap_cgroup_swapon(p->type, maxpages);
3222 if (error) 3225 if (error)
@@ -3307,6 +3310,8 @@ bad_swap:
3307 vfree(swap_map); 3310 vfree(swap_map);
3308 kvfree(cluster_info); 3311 kvfree(cluster_info);
3309 kvfree(frontswap_map); 3312 kvfree(frontswap_map);
3313 if (inced_nr_rotate_swap)
3314 atomic_dec(&nr_rotate_swap);
3310 if (swap_file) { 3315 if (swap_file) {
3311 if (inode && S_ISREG(inode->i_mode)) { 3316 if (inode && S_ISREG(inode->i_mode)) {
3312 inode_unlock(inode); 3317 inode_unlock(inode);
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index a11d3d89f012..a35f597e8c8b 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1536,7 +1536,7 @@ out:
1536 1536
1537 if (!ret && primary_if) 1537 if (!ret && primary_if)
1538 *primary_if = hard_iface; 1538 *primary_if = hard_iface;
1539 else 1539 else if (hard_iface)
1540 batadv_hardif_put(hard_iface); 1540 batadv_hardif_put(hard_iface);
1541 1541
1542 return ret; 1542 return ret;
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 0225616d5771..3986551397ca 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -862,7 +862,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
862 struct batadv_orig_node_vlan *vlan; 862 struct batadv_orig_node_vlan *vlan;
863 u8 *tt_change_ptr; 863 u8 *tt_change_ptr;
864 864
865 rcu_read_lock(); 865 spin_lock_bh(&orig_node->vlan_list_lock);
866 hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) { 866 hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
867 num_vlan++; 867 num_vlan++;
868 num_entries += atomic_read(&vlan->tt.num_entries); 868 num_entries += atomic_read(&vlan->tt.num_entries);
@@ -900,7 +900,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
900 *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr; 900 *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
901 901
902out: 902out:
903 rcu_read_unlock(); 903 spin_unlock_bh(&orig_node->vlan_list_lock);
904 return tvlv_len; 904 return tvlv_len;
905} 905}
906 906
@@ -931,15 +931,20 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
931 struct batadv_tvlv_tt_vlan_data *tt_vlan; 931 struct batadv_tvlv_tt_vlan_data *tt_vlan;
932 struct batadv_softif_vlan *vlan; 932 struct batadv_softif_vlan *vlan;
933 u16 num_vlan = 0; 933 u16 num_vlan = 0;
934 u16 num_entries = 0; 934 u16 vlan_entries = 0;
935 u16 total_entries = 0;
935 u16 tvlv_len; 936 u16 tvlv_len;
936 u8 *tt_change_ptr; 937 u8 *tt_change_ptr;
937 int change_offset; 938 int change_offset;
938 939
939 rcu_read_lock(); 940 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
940 hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { 941 hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
942 vlan_entries = atomic_read(&vlan->tt.num_entries);
943 if (vlan_entries < 1)
944 continue;
945
941 num_vlan++; 946 num_vlan++;
942 num_entries += atomic_read(&vlan->tt.num_entries); 947 total_entries += vlan_entries;
943 } 948 }
944 949
945 change_offset = sizeof(**tt_data); 950 change_offset = sizeof(**tt_data);
@@ -947,7 +952,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
947 952
948 /* if tt_len is negative, allocate the space needed by the full table */ 953 /* if tt_len is negative, allocate the space needed by the full table */
949 if (*tt_len < 0) 954 if (*tt_len < 0)
950 *tt_len = batadv_tt_len(num_entries); 955 *tt_len = batadv_tt_len(total_entries);
951 956
952 tvlv_len = *tt_len; 957 tvlv_len = *tt_len;
953 tvlv_len += change_offset; 958 tvlv_len += change_offset;
@@ -964,6 +969,10 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
964 969
965 tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1); 970 tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
966 hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { 971 hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
972 vlan_entries = atomic_read(&vlan->tt.num_entries);
973 if (vlan_entries < 1)
974 continue;
975
967 tt_vlan->vid = htons(vlan->vid); 976 tt_vlan->vid = htons(vlan->vid);
968 tt_vlan->crc = htonl(vlan->tt.crc); 977 tt_vlan->crc = htonl(vlan->tt.crc);
969 978
@@ -974,7 +983,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
974 *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr; 983 *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
975 984
976out: 985out:
977 rcu_read_unlock(); 986 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
978 return tvlv_len; 987 return tvlv_len;
979} 988}
980 989
@@ -1538,6 +1547,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
1538 * handled by a given originator 1547 * handled by a given originator
1539 * @entry: the TT global entry to check 1548 * @entry: the TT global entry to check
1540 * @orig_node: the originator to search in the list 1549 * @orig_node: the originator to search in the list
1550 * @flags: a pointer to store TT flags for the given @entry received
1551 * from @orig_node
1541 * 1552 *
1542 * find out if an orig_node is already in the list of a tt_global_entry. 1553 * find out if an orig_node is already in the list of a tt_global_entry.
1543 * 1554 *
@@ -1545,7 +1556,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
1545 */ 1556 */
1546static bool 1557static bool
1547batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry, 1558batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
1548 const struct batadv_orig_node *orig_node) 1559 const struct batadv_orig_node *orig_node,
1560 u8 *flags)
1549{ 1561{
1550 struct batadv_tt_orig_list_entry *orig_entry; 1562 struct batadv_tt_orig_list_entry *orig_entry;
1551 bool found = false; 1563 bool found = false;
@@ -1553,6 +1565,10 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
1553 orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node); 1565 orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
1554 if (orig_entry) { 1566 if (orig_entry) {
1555 found = true; 1567 found = true;
1568
1569 if (flags)
1570 *flags = orig_entry->flags;
1571
1556 batadv_tt_orig_list_entry_put(orig_entry); 1572 batadv_tt_orig_list_entry_put(orig_entry);
1557 } 1573 }
1558 1574
@@ -1731,7 +1747,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
1731 if (!(common->flags & BATADV_TT_CLIENT_TEMP)) 1747 if (!(common->flags & BATADV_TT_CLIENT_TEMP))
1732 goto out; 1748 goto out;
1733 if (batadv_tt_global_entry_has_orig(tt_global_entry, 1749 if (batadv_tt_global_entry_has_orig(tt_global_entry,
1734 orig_node)) 1750 orig_node, NULL))
1735 goto out_remove; 1751 goto out_remove;
1736 batadv_tt_global_del_orig_list(tt_global_entry); 1752 batadv_tt_global_del_orig_list(tt_global_entry);
1737 goto add_orig_entry; 1753 goto add_orig_entry;
@@ -2880,23 +2896,46 @@ unlock:
2880} 2896}
2881 2897
2882/** 2898/**
2883 * batadv_tt_local_valid() - verify that given tt entry is a valid one 2899 * batadv_tt_local_valid() - verify local tt entry and get flags
2884 * @entry_ptr: to be checked local tt entry 2900 * @entry_ptr: to be checked local tt entry
2885 * @data_ptr: not used but definition required to satisfy the callback prototype 2901 * @data_ptr: not used but definition required to satisfy the callback prototype
2902 * @flags: a pointer to store TT flags for this client to
2903 *
2904 * Checks the validity of the given local TT entry. If it is, then the provided
2905 * flags pointer is updated.
2886 * 2906 *
2887 * Return: true if the entry is a valid, false otherwise. 2907 * Return: true if the entry is a valid, false otherwise.
2888 */ 2908 */
2889static bool batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr) 2909static bool batadv_tt_local_valid(const void *entry_ptr,
2910 const void *data_ptr,
2911 u8 *flags)
2890{ 2912{
2891 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; 2913 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
2892 2914
2893 if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW) 2915 if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
2894 return false; 2916 return false;
2917
2918 if (flags)
2919 *flags = tt_common_entry->flags;
2920
2895 return true; 2921 return true;
2896} 2922}
2897 2923
2924/**
2925 * batadv_tt_global_valid() - verify global tt entry and get flags
2926 * @entry_ptr: to be checked global tt entry
2927 * @data_ptr: an orig_node object (may be NULL)
2928 * @flags: a pointer to store TT flags for this client to
2929 *
2930 * Checks the validity of the given global TT entry. If it is, then the provided
2931 * flags pointer is updated either with the common (summed) TT flags if data_ptr
2932 * is NULL or the specific, per originator TT flags otherwise.
2933 *
2934 * Return: true if the entry is a valid, false otherwise.
2935 */
2898static bool batadv_tt_global_valid(const void *entry_ptr, 2936static bool batadv_tt_global_valid(const void *entry_ptr,
2899 const void *data_ptr) 2937 const void *data_ptr,
2938 u8 *flags)
2900{ 2939{
2901 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; 2940 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
2902 const struct batadv_tt_global_entry *tt_global_entry; 2941 const struct batadv_tt_global_entry *tt_global_entry;
@@ -2910,7 +2949,8 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
2910 struct batadv_tt_global_entry, 2949 struct batadv_tt_global_entry,
2911 common); 2950 common);
2912 2951
2913 return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node); 2952 return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node,
2953 flags);
2914} 2954}
2915 2955
2916/** 2956/**
@@ -2920,25 +2960,34 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
2920 * @hash: hash table containing the tt entries 2960 * @hash: hash table containing the tt entries
2921 * @tt_len: expected tvlv tt data buffer length in number of bytes 2961 * @tt_len: expected tvlv tt data buffer length in number of bytes
2922 * @tvlv_buff: pointer to the buffer to fill with the TT data 2962 * @tvlv_buff: pointer to the buffer to fill with the TT data
2923 * @valid_cb: function to filter tt change entries 2963 * @valid_cb: function to filter tt change entries and to return TT flags
2924 * @cb_data: data passed to the filter function as argument 2964 * @cb_data: data passed to the filter function as argument
2965 *
2966 * Fills the tvlv buff with the tt entries from the specified hash. If valid_cb
2967 * is not provided then this becomes a no-op.
2925 */ 2968 */
2926static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, 2969static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
2927 struct batadv_hashtable *hash, 2970 struct batadv_hashtable *hash,
2928 void *tvlv_buff, u16 tt_len, 2971 void *tvlv_buff, u16 tt_len,
2929 bool (*valid_cb)(const void *, 2972 bool (*valid_cb)(const void *,
2930 const void *), 2973 const void *,
2974 u8 *flags),
2931 void *cb_data) 2975 void *cb_data)
2932{ 2976{
2933 struct batadv_tt_common_entry *tt_common_entry; 2977 struct batadv_tt_common_entry *tt_common_entry;
2934 struct batadv_tvlv_tt_change *tt_change; 2978 struct batadv_tvlv_tt_change *tt_change;
2935 struct hlist_head *head; 2979 struct hlist_head *head;
2936 u16 tt_tot, tt_num_entries = 0; 2980 u16 tt_tot, tt_num_entries = 0;
2981 u8 flags;
2982 bool ret;
2937 u32 i; 2983 u32 i;
2938 2984
2939 tt_tot = batadv_tt_entries(tt_len); 2985 tt_tot = batadv_tt_entries(tt_len);
2940 tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff; 2986 tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
2941 2987
2988 if (!valid_cb)
2989 return;
2990
2942 rcu_read_lock(); 2991 rcu_read_lock();
2943 for (i = 0; i < hash->size; i++) { 2992 for (i = 0; i < hash->size; i++) {
2944 head = &hash->table[i]; 2993 head = &hash->table[i];
@@ -2948,11 +2997,12 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
2948 if (tt_tot == tt_num_entries) 2997 if (tt_tot == tt_num_entries)
2949 break; 2998 break;
2950 2999
2951 if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data))) 3000 ret = valid_cb(tt_common_entry, cb_data, &flags);
3001 if (!ret)
2952 continue; 3002 continue;
2953 3003
2954 ether_addr_copy(tt_change->addr, tt_common_entry->addr); 3004 ether_addr_copy(tt_change->addr, tt_common_entry->addr);
2955 tt_change->flags = tt_common_entry->flags; 3005 tt_change->flags = flags;
2956 tt_change->vid = htons(tt_common_entry->vid); 3006 tt_change->vid = htons(tt_common_entry->vid);
2957 memset(tt_change->reserved, 0, 3007 memset(tt_change->reserved, 0,
2958 sizeof(tt_change->reserved)); 3008 sizeof(tt_change->reserved));
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 84cd4e3fd01b..0d56e36a6db7 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -283,9 +283,7 @@ int dccp_disconnect(struct sock *sk, int flags)
283 283
284 dccp_clear_xmit_timers(sk); 284 dccp_clear_xmit_timers(sk);
285 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); 285 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
286 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
287 dp->dccps_hc_rx_ccid = NULL; 286 dp->dccps_hc_rx_ccid = NULL;
288 dp->dccps_hc_tx_ccid = NULL;
289 287
290 __skb_queue_purge(&sk->sk_receive_queue); 288 __skb_queue_purge(&sk->sk_receive_queue);
291 __skb_queue_purge(&sk->sk_write_queue); 289 __skb_queue_purge(&sk->sk_write_queue);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4d622112bf95..e66172aaf241 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -649,6 +649,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
649 [RTA_ENCAP] = { .type = NLA_NESTED }, 649 [RTA_ENCAP] = { .type = NLA_NESTED },
650 [RTA_UID] = { .type = NLA_U32 }, 650 [RTA_UID] = { .type = NLA_U32 },
651 [RTA_MARK] = { .type = NLA_U32 }, 651 [RTA_MARK] = { .type = NLA_U32 },
652 [RTA_TABLE] = { .type = NLA_U32 },
652}; 653};
653 654
654static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, 655static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5ad2d8ed3a3f..57bbb060faaf 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -505,8 +505,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
505 int err; 505 int err;
506 int copied; 506 int copied;
507 507
508 WARN_ON_ONCE(sk->sk_family == AF_INET6);
509
510 err = -EAGAIN; 508 err = -EAGAIN;
511 skb = sock_dequeue_err_skb(sk); 509 skb = sock_dequeue_err_skb(sk);
512 if (!skb) 510 if (!skb)
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index 4fe97723b53f..30221701614c 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -43,7 +43,10 @@ mr_table_alloc(struct net *net, u32 id,
43 write_pnet(&mrt->net, net); 43 write_pnet(&mrt->net, net);
44 44
45 mrt->ops = *ops; 45 mrt->ops = *ops;
46 rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params); 46 if (rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params)) {
47 kfree(mrt);
48 return NULL;
49 }
47 INIT_LIST_HEAD(&mrt->mfc_cache_list); 50 INIT_LIST_HEAD(&mrt->mfc_cache_list);
48 INIT_LIST_HEAD(&mrt->mfc_unres_queue); 51 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
49 52
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 0f6c9ca59062..5b5b0f95ffd1 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -401,7 +401,7 @@ u32 mesh_plink_deactivate(struct sta_info *sta)
401 401
402static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, 402static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
403 struct sta_info *sta, 403 struct sta_info *sta,
404 struct ieee802_11_elems *elems, bool insert) 404 struct ieee802_11_elems *elems)
405{ 405{
406 struct ieee80211_local *local = sdata->local; 406 struct ieee80211_local *local = sdata->local;
407 struct ieee80211_supported_band *sband; 407 struct ieee80211_supported_band *sband;
@@ -447,7 +447,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
447 sta->sta.bandwidth = IEEE80211_STA_RX_BW_20; 447 sta->sta.bandwidth = IEEE80211_STA_RX_BW_20;
448 } 448 }
449 449
450 if (insert) 450 if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
451 rate_control_rate_init(sta); 451 rate_control_rate_init(sta);
452 else 452 else
453 rate_control_rate_update(local, sband, sta, changed); 453 rate_control_rate_update(local, sband, sta, changed);
@@ -551,7 +551,7 @@ mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
551 rcu_read_lock(); 551 rcu_read_lock();
552 sta = sta_info_get(sdata, addr); 552 sta = sta_info_get(sdata, addr);
553 if (sta) { 553 if (sta) {
554 mesh_sta_info_init(sdata, sta, elems, false); 554 mesh_sta_info_init(sdata, sta, elems);
555 } else { 555 } else {
556 rcu_read_unlock(); 556 rcu_read_unlock();
557 /* can't run atomic */ 557 /* can't run atomic */
@@ -561,7 +561,7 @@ mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
561 return NULL; 561 return NULL;
562 } 562 }
563 563
564 mesh_sta_info_init(sdata, sta, elems, true); 564 mesh_sta_info_init(sdata, sta, elems);
565 565
566 if (sta_info_insert_rcu(sta)) 566 if (sta_info_insert_rcu(sta))
567 return NULL; 567 return NULL;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e9422fe45179..acb7b86574cd 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2911,7 +2911,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2911 if (unlikely(offset < 0)) 2911 if (unlikely(offset < 0))
2912 goto out_free; 2912 goto out_free;
2913 } else if (reserve) { 2913 } else if (reserve) {
2914 skb_push(skb, reserve); 2914 skb_reserve(skb, -reserve);
2915 } 2915 }
2916 2916
2917 /* Returns -EFAULT on error */ 2917 /* Returns -EFAULT on error */
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 963e4bf0aab8..a57e112d9b3e 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1588,7 +1588,7 @@ int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
1588 return ret; 1588 return ret;
1589 ok_count = ret; 1589 ok_count = ret;
1590 1590
1591 if (!exts) 1591 if (!exts || ok_count)
1592 return ok_count; 1592 return ok_count;
1593 ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop); 1593 ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
1594 if (ret < 0) 1594 if (ret < 0)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 42247110d842..0cd2e764f47f 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -1006,7 +1006,7 @@ static const struct proto_ops inet6_seqpacket_ops = {
1006 .owner = THIS_MODULE, 1006 .owner = THIS_MODULE,
1007 .release = inet6_release, 1007 .release = inet6_release,
1008 .bind = inet6_bind, 1008 .bind = inet6_bind,
1009 .connect = inet_dgram_connect, 1009 .connect = sctp_inet_connect,
1010 .socketpair = sock_no_socketpair, 1010 .socketpair = sock_no_socketpair,
1011 .accept = inet_accept, 1011 .accept = inet_accept,
1012 .getname = sctp_getname, 1012 .getname = sctp_getname,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d685f8456762..6bf0a9971888 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1012,7 +1012,7 @@ static const struct proto_ops inet_seqpacket_ops = {
1012 .owner = THIS_MODULE, 1012 .owner = THIS_MODULE,
1013 .release = inet_release, /* Needs to be wrapped... */ 1013 .release = inet_release, /* Needs to be wrapped... */
1014 .bind = inet_bind, 1014 .bind = inet_bind,
1015 .connect = inet_dgram_connect, 1015 .connect = sctp_inet_connect,
1016 .socketpair = sock_no_socketpair, 1016 .socketpair = sock_no_socketpair,
1017 .accept = inet_accept, 1017 .accept = inet_accept,
1018 .getname = inet_getname, /* Semantics are different. */ 1018 .getname = inet_getname, /* Semantics are different. */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 80835ac26d2c..ae7e7c606f72 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1086,7 +1086,7 @@ out:
1086 */ 1086 */
1087static int __sctp_connect(struct sock *sk, 1087static int __sctp_connect(struct sock *sk,
1088 struct sockaddr *kaddrs, 1088 struct sockaddr *kaddrs,
1089 int addrs_size, 1089 int addrs_size, int flags,
1090 sctp_assoc_t *assoc_id) 1090 sctp_assoc_t *assoc_id)
1091{ 1091{
1092 struct net *net = sock_net(sk); 1092 struct net *net = sock_net(sk);
@@ -1104,7 +1104,6 @@ static int __sctp_connect(struct sock *sk,
1104 union sctp_addr *sa_addr = NULL; 1104 union sctp_addr *sa_addr = NULL;
1105 void *addr_buf; 1105 void *addr_buf;
1106 unsigned short port; 1106 unsigned short port;
1107 unsigned int f_flags = 0;
1108 1107
1109 sp = sctp_sk(sk); 1108 sp = sctp_sk(sk);
1110 ep = sp->ep; 1109 ep = sp->ep;
@@ -1254,13 +1253,7 @@ static int __sctp_connect(struct sock *sk,
1254 sp->pf->to_sk_daddr(sa_addr, sk); 1253 sp->pf->to_sk_daddr(sa_addr, sk);
1255 sk->sk_err = 0; 1254 sk->sk_err = 0;
1256 1255
1257 /* in-kernel sockets don't generally have a file allocated to them 1256 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1258 * if all they do is call sock_create_kern().
1259 */
1260 if (sk->sk_socket->file)
1261 f_flags = sk->sk_socket->file->f_flags;
1262
1263 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1264 1257
1265 if (assoc_id) 1258 if (assoc_id)
1266 *assoc_id = asoc->assoc_id; 1259 *assoc_id = asoc->assoc_id;
@@ -1348,7 +1341,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
1348 sctp_assoc_t *assoc_id) 1341 sctp_assoc_t *assoc_id)
1349{ 1342{
1350 struct sockaddr *kaddrs; 1343 struct sockaddr *kaddrs;
1351 int err = 0; 1344 int err = 0, flags = 0;
1352 1345
1353 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1346 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
1354 __func__, sk, addrs, addrs_size); 1347 __func__, sk, addrs, addrs_size);
@@ -1367,7 +1360,13 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
1367 if (err) 1360 if (err)
1368 goto out_free; 1361 goto out_free;
1369 1362
1370 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); 1363 /* in-kernel sockets don't generally have a file allocated to them
1364 * if all they do is call sock_create_kern().
1365 */
1366 if (sk->sk_socket->file)
1367 flags = sk->sk_socket->file->f_flags;
1368
1369 err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
1371 1370
1372out_free: 1371out_free:
1373 kvfree(kaddrs); 1372 kvfree(kaddrs);
@@ -4397,16 +4396,26 @@ out_nounlock:
4397 * len: the size of the address. 4396 * len: the size of the address.
4398 */ 4397 */
4399static int sctp_connect(struct sock *sk, struct sockaddr *addr, 4398static int sctp_connect(struct sock *sk, struct sockaddr *addr,
4400 int addr_len) 4399 int addr_len, int flags)
4401{ 4400{
4402 int err = 0; 4401 struct inet_sock *inet = inet_sk(sk);
4403 struct sctp_af *af; 4402 struct sctp_af *af;
4403 int err = 0;
4404 4404
4405 lock_sock(sk); 4405 lock_sock(sk);
4406 4406
4407 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, 4407 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
4408 addr, addr_len); 4408 addr, addr_len);
4409 4409
4410 /* We may need to bind the socket. */
4411 if (!inet->inet_num) {
4412 if (sk->sk_prot->get_port(sk, 0)) {
4413 release_sock(sk);
4414 return -EAGAIN;
4415 }
4416 inet->inet_sport = htons(inet->inet_num);
4417 }
4418
4410 /* Validate addr_len before calling common connect/connectx routine. */ 4419 /* Validate addr_len before calling common connect/connectx routine. */
4411 af = sctp_get_af_specific(addr->sa_family); 4420 af = sctp_get_af_specific(addr->sa_family);
4412 if (!af || addr_len < af->sockaddr_len) { 4421 if (!af || addr_len < af->sockaddr_len) {
@@ -4415,13 +4424,25 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
4415 /* Pass correct addr len to common routine (so it knows there 4424 /* Pass correct addr len to common routine (so it knows there
4416 * is only one address being passed. 4425 * is only one address being passed.
4417 */ 4426 */
4418 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); 4427 err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
4419 } 4428 }
4420 4429
4421 release_sock(sk); 4430 release_sock(sk);
4422 return err; 4431 return err;
4423} 4432}
4424 4433
4434int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
4435 int addr_len, int flags)
4436{
4437 if (addr_len < sizeof(uaddr->sa_family))
4438 return -EINVAL;
4439
4440 if (uaddr->sa_family == AF_UNSPEC)
4441 return -EOPNOTSUPP;
4442
4443 return sctp_connect(sock->sk, uaddr, addr_len, flags);
4444}
4445
4425/* FIXME: Write comments. */ 4446/* FIXME: Write comments. */
4426static int sctp_disconnect(struct sock *sk, int flags) 4447static int sctp_disconnect(struct sock *sk, int flags)
4427{ 4448{
@@ -8724,7 +8745,6 @@ struct proto sctp_prot = {
8724 .name = "SCTP", 8745 .name = "SCTP",
8725 .owner = THIS_MODULE, 8746 .owner = THIS_MODULE,
8726 .close = sctp_close, 8747 .close = sctp_close,
8727 .connect = sctp_connect,
8728 .disconnect = sctp_disconnect, 8748 .disconnect = sctp_disconnect,
8729 .accept = sctp_accept, 8749 .accept = sctp_accept,
8730 .ioctl = sctp_ioctl, 8750 .ioctl = sctp_ioctl,
@@ -8767,7 +8787,6 @@ struct proto sctpv6_prot = {
8767 .name = "SCTPv6", 8787 .name = "SCTPv6",
8768 .owner = THIS_MODULE, 8788 .owner = THIS_MODULE,
8769 .close = sctp_close, 8789 .close = sctp_close,
8770 .connect = sctp_connect,
8771 .disconnect = sctp_disconnect, 8790 .disconnect = sctp_disconnect,
8772 .accept = sctp_accept, 8791 .accept = sctp_accept,
8773 .ioctl = sctp_ioctl, 8792 .ioctl = sctp_ioctl,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a052693c2e85..7c5135a92d76 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -15555,7 +15555,8 @@ void cfg80211_ft_event(struct net_device *netdev,
15555 if (!ft_event->target_ap) 15555 if (!ft_event->target_ap)
15556 return; 15556 return;
15557 15557
15558 msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL); 15558 msg = nlmsg_new(100 + ft_event->ies_len + ft_event->ric_ies_len,
15559 GFP_KERNEL);
15559 if (!msg) 15560 if (!msg)
15560 return; 15561 return;
15561 15562
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index ac3e12c32aa3..5fcec5c94eb7 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -916,6 +916,9 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
916 const struct fwdb_header *hdr = regdb; 916 const struct fwdb_header *hdr = regdb;
917 const struct fwdb_country *country; 917 const struct fwdb_country *country;
918 918
919 if (!regdb)
920 return -ENODATA;
921
919 if (IS_ERR(regdb)) 922 if (IS_ERR(regdb))
920 return PTR_ERR(regdb); 923 return PTR_ERR(regdb);
921 924
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index e16d6713f236..2d42eb9cd1a5 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5041,7 +5041,7 @@ sub process {
5041 $tmp_stmt =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g; 5041 $tmp_stmt =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g;
5042 $tmp_stmt =~ s/\#+\s*$arg\b//g; 5042 $tmp_stmt =~ s/\#+\s*$arg\b//g;
5043 $tmp_stmt =~ s/\b$arg\s*\#\#//g; 5043 $tmp_stmt =~ s/\b$arg\s*\#\#//g;
5044 my $use_cnt = $tmp_stmt =~ s/\b$arg\b//g; 5044 my $use_cnt = () = $tmp_stmt =~ /\b$arg\b/g;
5045 if ($use_cnt > 1) { 5045 if ($use_cnt > 1) {
5046 CHK("MACRO_ARG_REUSE", 5046 CHK("MACRO_ARG_REUSE",
5047 "Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx"); 5047 "Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx");
diff --git a/sound/core/timer.c b/sound/core/timer.c
index dc87728c5b74..0ddcae495838 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -592,7 +592,7 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
592 else 592 else
593 timeri->flags |= SNDRV_TIMER_IFLG_PAUSED; 593 timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
594 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : 594 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
595 SNDRV_TIMER_EVENT_CONTINUE); 595 SNDRV_TIMER_EVENT_PAUSE);
596 unlock: 596 unlock:
597 spin_unlock_irqrestore(&timer->lock, flags); 597 spin_unlock_irqrestore(&timer->lock, flags);
598 return result; 598 return result;
@@ -614,7 +614,7 @@ static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
614 list_del_init(&timeri->ack_list); 614 list_del_init(&timeri->ack_list);
615 list_del_init(&timeri->active_list); 615 list_del_init(&timeri->active_list);
616 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : 616 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
617 SNDRV_TIMER_EVENT_CONTINUE); 617 SNDRV_TIMER_EVENT_PAUSE);
618 spin_unlock(&timeri->timer->lock); 618 spin_unlock(&timeri->timer->lock);
619 } 619 }
620 spin_unlock_irqrestore(&slave_active_lock, flags); 620 spin_unlock_irqrestore(&slave_active_lock, flags);
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 321e78baa63c..9bd935216c18 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -622,8 +622,10 @@ snd_hda_check_power_state(struct hda_codec *codec, hda_nid_t nid,
622{ 622{
623 return snd_hdac_check_power_state(&codec->core, nid, target_state); 623 return snd_hdac_check_power_state(&codec->core, nid, target_state);
624} 624}
625static inline bool snd_hda_sync_power_state(struct hda_codec *codec, 625
626 hda_nid_t nid, unsigned int target_state) 626static inline unsigned int snd_hda_sync_power_state(struct hda_codec *codec,
627 hda_nid_t nid,
628 unsigned int target_state)
627{ 629{
628 return snd_hdac_sync_power_state(&codec->core, nid, target_state); 630 return snd_hdac_sync_power_state(&codec->core, nid, target_state);
629} 631}
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index 6c645eb77d42..ee820fcc29b0 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -252,6 +252,13 @@ void idr_checks(void)
252 idr_remove(&idr, 3); 252 idr_remove(&idr, 3);
253 idr_remove(&idr, 0); 253 idr_remove(&idr, 0);
254 254
255 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0);
256 idr_remove(&idr, 1);
257 for (i = 1; i < RADIX_TREE_MAP_SIZE; i++)
258 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i);
259 idr_remove(&idr, 1 << 30);
260 idr_destroy(&idr);
261
255 for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { 262 for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
256 struct item *item = item_create(i, 0); 263 struct item *item = item_create(i, 0);
257 assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); 264 assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 983dd25d49f4..1eefe211a4a8 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -5,3 +5,5 @@ CONFIG_BPF_EVENTS=y
5CONFIG_TEST_BPF=m 5CONFIG_TEST_BPF=m
6CONFIG_CGROUP_BPF=y 6CONFIG_CGROUP_BPF=y
7CONFIG_NETDEVSIM=m 7CONFIG_NETDEVSIM=m
8CONFIG_NET_CLS_ACT=y
9CONFIG_NET_SCH_INGRESS=y
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 6a75a3ea44ad..7ba089b33e8b 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -7,3 +7,8 @@ CONFIG_NET_L3_MASTER_DEV=y
7CONFIG_IPV6=y 7CONFIG_IPV6=y
8CONFIG_IPV6_MULTIPLE_TABLES=y 8CONFIG_IPV6_MULTIPLE_TABLES=y
9CONFIG_VETH=y 9CONFIG_VETH=y
10CONFIG_INET_XFRM_MODE_TUNNEL=y
11CONFIG_NET_IPVTI=y
12CONFIG_INET6_XFRM_MODE_TUNNEL=y
13CONFIG_IPV6_VTI=y
14CONFIG_DUMMY=y
diff --git a/tools/testing/selftests/net/reuseport_bpf_numa.c b/tools/testing/selftests/net/reuseport_bpf_numa.c
index 365c32e84189..c9f478b40996 100644
--- a/tools/testing/selftests/net/reuseport_bpf_numa.c
+++ b/tools/testing/selftests/net/reuseport_bpf_numa.c
@@ -23,6 +23,8 @@
23#include <unistd.h> 23#include <unistd.h>
24#include <numa.h> 24#include <numa.h>
25 25
26#include "../kselftest.h"
27
26static const int PORT = 8888; 28static const int PORT = 8888;
27 29
28static void build_rcv_group(int *rcv_fd, size_t len, int family, int proto) 30static void build_rcv_group(int *rcv_fd, size_t len, int family, int proto)
@@ -229,7 +231,7 @@ int main(void)
229 int *rcv_fd, nodes; 231 int *rcv_fd, nodes;
230 232
231 if (numa_available() < 0) 233 if (numa_available() < 0)
232 error(1, errno, "no numa api support"); 234 ksft_exit_skip("no numa api support\n");
233 235
234 nodes = numa_max_node() + 1; 236 nodes = numa_max_node() + 1;
235 237