aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-04-01 19:49:34 -0400
committerDavid S. Miller <davem@davemloft.net>2018-04-01 19:49:34 -0400
commitc0b458a9463bd6be165374a8e9e3235800ee132e (patch)
treea96c6393749ab231c6dda8c62683493bd1c66070
parent859a59352e926315b6384c5fd895b00a30659a12 (diff)
parentb5dbc28762fd3fd40ba76303be0c7f707826f982 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor conflicts in drivers/net/ethernet/mellanox/mlx5/core/en_rep.c, we had some overlapping changes: 1) In 'net' MLX5E_PARAMS_LOG_{SQ,RQ}_SIZE --> MLX5E_REP_PARAMS_LOG_{SQ,RQ}_SIZE 2) In 'net-next' params->log_rq_size is renamed to be params->log_rq_mtu_frames. 3) In 'net-next' params->hard_mtu is added. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--.mailmap1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt4
-rw-r--r--Documentation/isdn/INTERFACE.CAPI2
-rw-r--r--Documentation/isdn/README4
-rw-r--r--Documentation/isdn/README.FAQ4
-rw-r--r--Documentation/isdn/README.gigaset16
-rw-r--r--MAINTAINERS39
-rw-r--r--Makefile6
-rw-r--r--arch/arm/Kconfig.debug1
-rwxr-xr-xarch/arm/boot/deflate_xip_data.sh6
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi5
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi5
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts2
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts63
-rw-r--r--arch/arm/include/asm/vdso.h2
-rw-r--r--arch/arm/kernel/vdso.c12
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c4
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c3
-rw-r--r--arch/arm/plat-omap/dmtimer.c7
-rw-r--r--arch/arm/plat-omap/include/plat/sram.h11
-rw-r--r--arch/arm/plat-omap/sram.c36
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi16
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi8
-rw-r--r--arch/mips/lantiq/Kconfig2
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c6
-rw-r--r--arch/mips/ralink/mt7621.c42
-rw-r--r--arch/mips/ralink/reset.c7
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h3
-rw-r--r--arch/powerpc/include/asm/cputable.h3
-rw-r--r--arch/powerpc/include/asm/mmu_context.h18
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c11
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S35
-rw-r--r--arch/powerpc/mm/hash_native_64.c16
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c1
-rw-r--r--arch/powerpc/mm/pgtable_64.c1
-rw-r--r--arch/powerpc/mm/tlb-radix.c169
-rw-r--r--arch/x86/Kconfig.cpu13
-rw-r--r--arch/x86/Makefile9
-rw-r--r--arch/x86/boot/compressed/misc.c4
-rw-r--r--arch/x86/entry/entry_64.S2
-rw-r--r--arch/x86/entry/vdso/vdso32/vclock_gettime.c2
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c2
-rw-r--r--arch/x86/events/core.c3
-rw-r--r--arch/x86/events/intel/core.c14
-rw-r--r--arch/x86/events/intel/ds.c6
-rw-r--r--arch/x86/events/intel/uncore_snbep.c32
-rw-r--r--arch/x86/events/perf_event.h6
-rw-r--r--arch/x86/include/asm/barrier.h30
-rw-r--r--arch/x86/include/asm/io.h15
-rw-r--r--arch/x86/kernel/idt.c2
-rw-r--r--arch/x86/kernel/kvm.c4
-rw-r--r--arch/x86/kernel/pci-nommu.c19
-rw-r--r--arch/x86/kernel/traps.c15
-rw-r--r--arch/x86/kvm/vmx.c10
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--arch/x86/um/asm/barrier.h4
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/dma/stm32-dmamux.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c3
-rw-r--r--drivers/gpu/drm/tegra/dc.c6
-rw-r--r--drivers/hv/ring_buffer.c52
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c5
-rw-r--r--drivers/iio/accel/st_accel_core.c7
-rw-r--r--drivers/iio/adc/meson_saradc.c4
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c39
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c12
-rw-r--r--drivers/iio/chemical/ccs811.c3
-rw-r--r--drivers/iio/pressure/st_pressure_core.c2
-rw-r--r--drivers/infiniband/core/addr.c25
-rw-r--r--drivers/infiniband/core/device.c3
-rw-r--r--drivers/infiniband/core/ucma.c47
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c12
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c3
-rw-r--r--drivers/infiniband/hw/qedr/main.c3
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c45
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/media/Kconfig2
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c17
-rw-r--r--drivers/mtd/chips/jedec_probe.c2
-rw-r--r--drivers/mtd/mtdchar.c4
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c2
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c32
-rw-r--r--drivers/net/bonding/bond_main.c73
-rw-r--r--drivers/net/dsa/mt7530.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c78
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c20
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/hyperv/rndis_filter.c2
-rw-r--r--drivers/net/team/team.c12
-rw-r--r--drivers/net/usb/lan78xx.c33
-rw-r--r--drivers/net/usb/qmi_wwan.c5
-rw-r--r--drivers/net/vrf.c5
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm.c56
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c14
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c28
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c7
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c61
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h40
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c1
-rw-r--r--drivers/scsi/hosts.c1
-rw-r--r--drivers/scsi/hpsa.c73
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c6
-rw-r--r--drivers/scsi/iscsi_tcp.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c39
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c12
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/virtio_scsi.c129
-rw-r--r--drivers/staging/ncpfs/ncplib_kernel.c4
-rw-r--r--drivers/tty/vt/vt.c8
-rw-r--r--drivers/vfio/pci/vfio_pci.c3
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/vhost/vhost.c17
-rw-r--r--fs/ceph/file.c9
-rw-r--r--include/linux/fsl_ifc.h6
-rw-r--r--include/linux/if_vlan.h15
-rw-r--r--include/linux/jump_label.h4
-rw-r--r--include/linux/net_dim.h2
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/netfilter/nf_tables.h4
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/rdma/ib_addr.h2
-rw-r--r--include/scsi/scsi_host.h3
-rw-r--r--include/uapi/linux/lirc.h1
-rw-r--r--include/uapi/linux/usb/audio.h4
-rw-r--r--init/main.c2
-rw-r--r--ipc/mqueue.c74
-rw-r--r--ipc/shm.c12
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/jump_label.c7
-rw-r--r--kernel/locking/mutex.c37
-rw-r--r--kernel/sched/debug.c29
-rw-r--r--kernel/time/posix-timers.c11
-rw-r--r--kernel/trace/trace_kprobe.c4
-rw-r--r--kernel/trace/trace_probe.c8
-rw-r--r--kernel/trace/trace_probe.h2
-rw-r--r--mm/kmemleak.c12
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/page_owner.c6
-rw-r--r--mm/slab.c1
-rw-r--r--mm/vmstat.c2
-rw-r--r--net/batman-adv/gateway_client.c5
-rw-r--r--net/batman-adv/multicast.c4
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/ipv4/ip_tunnel.c31
-rw-r--r--net/ipv4/ip_vti.c2
-rw-r--r--net/ipv4/netfilter/Makefile2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c14
-rw-r--r--net/ipv4/netfilter/nf_socket_ipv4.c6
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv6/ip6_output.c13
-rw-r--r--net/ipv6/ip6_vti.c36
-rw-r--r--net/ipv6/netfilter/nf_socket_ipv6.c6
-rw-r--r--net/ipv6/route.c16
-rw-r--r--net/ipv6/seg6_iptunnel.c16
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/llc/llc_c_ac.c15
-rw-r--r--net/llc/llc_conn.c32
-rw-r--r--net/netfilter/nf_tables_api.c106
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/sched/act_api.c4
-rw-r--r--net/sched/sch_generic.c17
-rw-r--r--net/smc/smc_clc.c2
-rw-r--r--net/strparser/strparser.c4
-rw-r--r--net/xfrm/xfrm_input.c6
-rw-r--r--net/xfrm/xfrm_output.c5
-rwxr-xr-xscripts/adjust_autoksyms.sh7
-rwxr-xr-xscripts/package/builddeb2
-rwxr-xr-xscripts/package/mkspec2
-rw-r--r--sound/core/oss/pcm_oss.c4
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/drivers/aloop.c17
-rw-r--r--sound/pci/hda/hda_intel.c5
-rw-r--r--sound/pci/hda/patch_realtek.c20
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/bpf/bpftool/map.c2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc46
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc97
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc43
-rw-r--r--tools/testing/selftests/x86/ptrace_syscall.c8
205 files changed, 1916 insertions, 1103 deletions
diff --git a/.mailmap b/.mailmap
index e18cab73e209..a2ce89a456c2 100644
--- a/.mailmap
+++ b/.mailmap
@@ -62,6 +62,7 @@ Frank Zago <fzago@systemfabricworks.com>
62Greg Kroah-Hartman <greg@echidna.(none)> 62Greg Kroah-Hartman <greg@echidna.(none)>
63Greg Kroah-Hartman <gregkh@suse.de> 63Greg Kroah-Hartman <gregkh@suse.de>
64Greg Kroah-Hartman <greg@kroah.com> 64Greg Kroah-Hartman <greg@kroah.com>
65Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
65Henk Vergonet <Henk.Vergonet@gmail.com> 66Henk Vergonet <Henk.Vergonet@gmail.com>
66Henrik Kretzschmar <henne@nachtwindheim.de> 67Henrik Kretzschmar <henne@nachtwindheim.de>
67Henrik Rydberg <rydberg@bitmath.org> 68Henrik Rydberg <rydberg@bitmath.org>
diff --git a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt
index e9ebb8a20e0d..ba24ca7ba95e 100644
--- a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt
+++ b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt
@@ -3,11 +3,11 @@ Device-Tree bindings for sigma delta modulator
3Required properties: 3Required properties:
4- compatible: should be "ads1201", "sd-modulator". "sd-modulator" can be use 4- compatible: should be "ads1201", "sd-modulator". "sd-modulator" can be use
5 as a generic SD modulator if modulator not specified in compatible list. 5 as a generic SD modulator if modulator not specified in compatible list.
6- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers". 6- #io-channel-cells = <0>: See the IIO bindings section "IIO consumers".
7 7
8Example node: 8Example node:
9 9
10 ads1202: adc@0 { 10 ads1202: adc@0 {
11 compatible = "sd-modulator"; 11 compatible = "sd-modulator";
12 #io-channel-cells = <1>; 12 #io-channel-cells = <0>;
13 }; 13 };
diff --git a/Documentation/isdn/INTERFACE.CAPI b/Documentation/isdn/INTERFACE.CAPI
index 1688b5a1fd77..021aa9cf139d 100644
--- a/Documentation/isdn/INTERFACE.CAPI
+++ b/Documentation/isdn/INTERFACE.CAPI
@@ -18,7 +18,7 @@ corresponding hardware driver. Kernel CAPI then forwards CAPI messages in both
18directions between the application and the hardware driver. 18directions between the application and the hardware driver.
19 19
20Format and semantics of CAPI messages are specified in the CAPI 2.0 standard. 20Format and semantics of CAPI messages are specified in the CAPI 2.0 standard.
21This standard is freely available from http://www.capi.org. 21This standard is freely available from https://www.capi.org.
22 22
23 23
242. Driver and Device Registration 242. Driver and Device Registration
diff --git a/Documentation/isdn/README b/Documentation/isdn/README
index 32d4e80c2c03..74bd2bdb455b 100644
--- a/Documentation/isdn/README
+++ b/Documentation/isdn/README
@@ -33,10 +33,10 @@ README for the ISDN-subsystem
33 de.alt.comm.isdn4linux 33 de.alt.comm.isdn4linux
34 34
35 There is also a well maintained FAQ in English available at 35 There is also a well maintained FAQ in English available at
36 http://www.mhessler.de/i4lfaq/ 36 https://www.mhessler.de/i4lfaq/
37 It can be viewed online, or downloaded in sgml/text/html format. 37 It can be viewed online, or downloaded in sgml/text/html format.
38 The FAQ can also be viewed online at 38 The FAQ can also be viewed online at
39 http://www.isdn4linux.de/faq/ 39 https://www.isdn4linux.de/faq/i4lfaq.html
40 or downloaded from 40 or downloaded from
41 ftp://ftp.isdn4linux.de/pub/isdn4linux/FAQ/ 41 ftp://ftp.isdn4linux.de/pub/isdn4linux/FAQ/
42 42
diff --git a/Documentation/isdn/README.FAQ b/Documentation/isdn/README.FAQ
index 356f7944641d..e5dd1addacdd 100644
--- a/Documentation/isdn/README.FAQ
+++ b/Documentation/isdn/README.FAQ
@@ -8,9 +8,9 @@ You find it in:
8 8
9In case you just want to see the FAQ online, or download the newest version, 9In case you just want to see the FAQ online, or download the newest version,
10you can have a look at my website: 10you can have a look at my website:
11http://www.mhessler.de/i4lfaq/ (view + download) 11https://www.mhessler.de/i4lfaq/ (view + download)
12or: 12or:
13http://www.isdn4linux.de/faq/ (view) 13https://www.isdn4linux.de/faq/4lfaq.html (view)
14 14
15As the extension tells, the FAQ is in SGML format, and you can convert it 15As the extension tells, the FAQ is in SGML format, and you can convert it
16into text/html/... format by using the sgml2txt/sgml2html/... tools. 16into text/html/... format by using the sgml2txt/sgml2html/... tools.
diff --git a/Documentation/isdn/README.gigaset b/Documentation/isdn/README.gigaset
index 7534c6039adc..9b1ce277ca3d 100644
--- a/Documentation/isdn/README.gigaset
+++ b/Documentation/isdn/README.gigaset
@@ -29,8 +29,9 @@ GigaSet 307x Device Driver
29 T-Com Sinus 721 data 29 T-Com Sinus 721 data
30 Chicago 390 USB (KPN) 30 Chicago 390 USB (KPN)
31 31
32 See also http://www.erbze.info/sinus_gigaset.htm and 32 See also http://www.erbze.info/sinus_gigaset.htm
33 http://gigaset307x.sourceforge.net/ 33 (archived at https://web.archive.org/web/20100717020421/http://www.erbze.info:80/sinus_gigaset.htm ) and
34 http://gigaset307x.sourceforge.net/
34 35
35 We had also reports from users of Gigaset M105 who could use the drivers 36 We had also reports from users of Gigaset M105 who could use the drivers
36 with SX 100 and CX 100 ISDN bases (only in unimodem mode, see section 2.5.) 37 with SX 100 and CX 100 ISDN bases (only in unimodem mode, see section 2.5.)
@@ -52,7 +53,7 @@ GigaSet 307x Device Driver
52 to use CAPI 2.0 or ISDN4Linux for ISDN connections (voice or data). 53 to use CAPI 2.0 or ISDN4Linux for ISDN connections (voice or data).
53 54
54 There are some user space tools available at 55 There are some user space tools available at
55 http://sourceforge.net/projects/gigaset307x/ 56 https://sourceforge.net/projects/gigaset307x/
56 which provide access to additional device specific functions like SMS, 57 which provide access to additional device specific functions like SMS,
57 phonebook or call journal. 58 phonebook or call journal.
58 59
@@ -202,7 +203,7 @@ GigaSet 307x Device Driver
202 You can use some configuration tool of your distribution to configure this 203 You can use some configuration tool of your distribution to configure this
203 "modem" or configure pppd/wvdial manually. There are some example ppp 204 "modem" or configure pppd/wvdial manually. There are some example ppp
204 configuration files and chat scripts in the gigaset-VERSION/ppp directory 205 configuration files and chat scripts in the gigaset-VERSION/ppp directory
205 in the driver packages from http://sourceforge.net/projects/gigaset307x/. 206 in the driver packages from https://sourceforge.net/projects/gigaset307x/.
206 Please note that the USB drivers are not able to change the state of the 207 Please note that the USB drivers are not able to change the state of the
207 control lines. This means you must use "Stupid Mode" if you are using 208 control lines. This means you must use "Stupid Mode" if you are using
208 wvdial or you should use the nocrtscts option of pppd. 209 wvdial or you should use the nocrtscts option of pppd.
@@ -361,7 +362,7 @@ GigaSet 307x Device Driver
361 --------------------------- 362 ---------------------------
362 If you can't solve problems with the driver on your own, feel free to 363 If you can't solve problems with the driver on your own, feel free to
363 use one of the forums, bug trackers, or mailing lists on 364 use one of the forums, bug trackers, or mailing lists on
364 http://sourceforge.net/projects/gigaset307x 365 https://sourceforge.net/projects/gigaset307x
365 or write an electronic mail to the maintainers. 366 or write an electronic mail to the maintainers.
366 367
367 Try to provide as much information as possible, such as 368 Try to provide as much information as possible, such as
@@ -391,11 +392,12 @@ GigaSet 307x Device Driver
3914. Links, other software 3924. Links, other software
392 --------------------- 393 ---------------------
393 - Sourceforge project developing this driver and associated tools 394 - Sourceforge project developing this driver and associated tools
394 http://sourceforge.net/projects/gigaset307x 395 https://sourceforge.net/projects/gigaset307x
395 - Yahoo! Group on the Siemens Gigaset family of devices 396 - Yahoo! Group on the Siemens Gigaset family of devices
396 http://de.groups.yahoo.com/group/Siemens-Gigaset 397 https://de.groups.yahoo.com/group/Siemens-Gigaset
397 - Siemens Gigaset/T-Sinus compatibility table 398 - Siemens Gigaset/T-Sinus compatibility table
398 http://www.erbze.info/sinus_gigaset.htm 399 http://www.erbze.info/sinus_gigaset.htm
400 (archived at https://web.archive.org/web/20100717020421/http://www.erbze.info:80/sinus_gigaset.htm )
399 401
400 402
4015. Credits 4035. Credits
diff --git a/MAINTAINERS b/MAINTAINERS
index 22ef8d64fa59..cbffcd4b1320 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1060,41 +1060,42 @@ ARM PORT
1060M: Russell King <linux@armlinux.org.uk> 1060M: Russell King <linux@armlinux.org.uk>
1061L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1061L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1062W: http://www.armlinux.org.uk/ 1062W: http://www.armlinux.org.uk/
1063S: Maintained 1063S: Odd Fixes
1064T: git git://git.armlinux.org.uk/~rmk/linux-arm.git 1064T: git git://git.armlinux.org.uk/~rmk/linux-arm.git
1065F: arch/arm/ 1065F: arch/arm/
1066X: arch/arm/boot/dts/
1066 1067
1067ARM PRIMECELL AACI PL041 DRIVER 1068ARM PRIMECELL AACI PL041 DRIVER
1068M: Russell King <linux@armlinux.org.uk> 1069M: Russell King <linux@armlinux.org.uk>
1069S: Maintained 1070S: Odd Fixes
1070F: sound/arm/aaci.* 1071F: sound/arm/aaci.*
1071 1072
1072ARM PRIMECELL BUS SUPPORT 1073ARM PRIMECELL BUS SUPPORT
1073M: Russell King <linux@armlinux.org.uk> 1074M: Russell King <linux@armlinux.org.uk>
1074S: Maintained 1075S: Odd Fixes
1075F: drivers/amba/ 1076F: drivers/amba/
1076F: include/linux/amba/bus.h 1077F: include/linux/amba/bus.h
1077 1078
1078ARM PRIMECELL CLCD PL110 DRIVER 1079ARM PRIMECELL CLCD PL110 DRIVER
1079M: Russell King <linux@armlinux.org.uk> 1080M: Russell King <linux@armlinux.org.uk>
1080S: Maintained 1081S: Odd Fixes
1081F: drivers/video/fbdev/amba-clcd.* 1082F: drivers/video/fbdev/amba-clcd.*
1082 1083
1083ARM PRIMECELL KMI PL050 DRIVER 1084ARM PRIMECELL KMI PL050 DRIVER
1084M: Russell King <linux@armlinux.org.uk> 1085M: Russell King <linux@armlinux.org.uk>
1085S: Maintained 1086S: Odd Fixes
1086F: drivers/input/serio/ambakmi.* 1087F: drivers/input/serio/ambakmi.*
1087F: include/linux/amba/kmi.h 1088F: include/linux/amba/kmi.h
1088 1089
1089ARM PRIMECELL MMCI PL180/1 DRIVER 1090ARM PRIMECELL MMCI PL180/1 DRIVER
1090M: Russell King <linux@armlinux.org.uk> 1091M: Russell King <linux@armlinux.org.uk>
1091S: Maintained 1092S: Odd Fixes
1092F: drivers/mmc/host/mmci.* 1093F: drivers/mmc/host/mmci.*
1093F: include/linux/amba/mmci.h 1094F: include/linux/amba/mmci.h
1094 1095
1095ARM PRIMECELL UART PL010 AND PL011 DRIVERS 1096ARM PRIMECELL UART PL010 AND PL011 DRIVERS
1096M: Russell King <linux@armlinux.org.uk> 1097M: Russell King <linux@armlinux.org.uk>
1097S: Maintained 1098S: Odd Fixes
1098F: drivers/tty/serial/amba-pl01*.c 1099F: drivers/tty/serial/amba-pl01*.c
1099F: include/linux/amba/serial.h 1100F: include/linux/amba/serial.h
1100 1101
@@ -1152,7 +1153,7 @@ S: Maintained
1152F: drivers/clk/sunxi/ 1153F: drivers/clk/sunxi/
1153 1154
1154ARM/Allwinner sunXi SoC support 1155ARM/Allwinner sunXi SoC support
1155M: Maxime Ripard <maxime.ripard@free-electrons.com> 1156M: Maxime Ripard <maxime.ripard@bootlin.com>
1156M: Chen-Yu Tsai <wens@csie.org> 1157M: Chen-Yu Tsai <wens@csie.org>
1157L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1158L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1158S: Maintained 1159S: Maintained
@@ -4626,7 +4627,7 @@ F: include/uapi/drm/drm*
4626F: include/linux/vga* 4627F: include/linux/vga*
4627 4628
4628DRM DRIVERS FOR ALLWINNER A10 4629DRM DRIVERS FOR ALLWINNER A10
4629M: Maxime Ripard <maxime.ripard@free-electrons.com> 4630M: Maxime Ripard <maxime.ripard@bootlin.com>
4630L: dri-devel@lists.freedesktop.org 4631L: dri-devel@lists.freedesktop.org
4631S: Supported 4632S: Supported
4632F: drivers/gpu/drm/sun4i/ 4633F: drivers/gpu/drm/sun4i/
@@ -8435,7 +8436,7 @@ S: Orphan
8435F: drivers/net/wireless/marvell/libertas/ 8436F: drivers/net/wireless/marvell/libertas/
8436 8437
8437MARVELL MACCHIATOBIN SUPPORT 8438MARVELL MACCHIATOBIN SUPPORT
8438M: Russell King <rmk@armlinux.org.uk> 8439M: Russell King <linux@armlinux.org.uk>
8439L: linux-arm-kernel@lists.infradead.org 8440L: linux-arm-kernel@lists.infradead.org
8440S: Maintained 8441S: Maintained
8441F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts 8442F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
@@ -8448,7 +8449,7 @@ F: drivers/net/ethernet/marvell/mv643xx_eth.*
8448F: include/linux/mv643xx.h 8449F: include/linux/mv643xx.h
8449 8450
8450MARVELL MV88X3310 PHY DRIVER 8451MARVELL MV88X3310 PHY DRIVER
8451M: Russell King <rmk@armlinux.org.uk> 8452M: Russell King <linux@armlinux.org.uk>
8452L: netdev@vger.kernel.org 8453L: netdev@vger.kernel.org
8453S: Maintained 8454S: Maintained
8454F: drivers/net/phy/marvell10g.c 8455F: drivers/net/phy/marvell10g.c
@@ -12892,6 +12893,19 @@ S: Maintained
12892F: drivers/net/ethernet/socionext/netsec.c 12893F: drivers/net/ethernet/socionext/netsec.c
12893F: Documentation/devicetree/bindings/net/socionext-netsec.txt 12894F: Documentation/devicetree/bindings/net/socionext-netsec.txt
12894 12895
12896SOLIDRUN CLEARFOG SUPPORT
12897M: Russell King <linux@armlinux.org.uk>
12898S: Maintained
12899F: arch/arm/boot/dts/armada-388-clearfog*
12900F: arch/arm/boot/dts/armada-38x-solidrun-*
12901
12902SOLIDRUN CUBOX-I/HUMMINGBOARD SUPPORT
12903M: Russell King <linux@armlinux.org.uk>
12904S: Maintained
12905F: arch/arm/boot/dts/imx6*-cubox-i*
12906F: arch/arm/boot/dts/imx6*-hummingboard*
12907F: arch/arm/boot/dts/imx6*-sr-*
12908
12895SONIC NETWORK DRIVER 12909SONIC NETWORK DRIVER
12896M: Thomas Bogendoerfer <tsbogend@alpha.franken.de> 12910M: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
12897L: netdev@vger.kernel.org 12911L: netdev@vger.kernel.org
@@ -13661,7 +13675,8 @@ S: Supported
13661F: drivers/i2c/busses/i2c-tegra.c 13675F: drivers/i2c/busses/i2c-tegra.c
13662 13676
13663TEGRA IOMMU DRIVERS 13677TEGRA IOMMU DRIVERS
13664M: Hiroshi Doyu <hdoyu@nvidia.com> 13678M: Thierry Reding <thierry.reding@gmail.com>
13679L: linux-tegra@vger.kernel.org
13665S: Supported 13680S: Supported
13666F: drivers/iommu/tegra* 13681F: drivers/iommu/tegra*
13667 13682
diff --git a/Makefile b/Makefile
index 486db374d1c1..9f77440dcdfe 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 16 3PATCHLEVEL = 16
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION = -rc7
6NAME = Fearless Coyote 6NAME = Fearless Coyote
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -487,6 +487,8 @@ CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
487endif 487endif
488KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 488KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
489KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 489KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
490KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
491KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
490endif 492endif
491 493
492RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register 494RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
@@ -743,8 +745,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
743# See modpost pattern 2 745# See modpost pattern 2
744KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,) 746KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
745KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior) 747KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
746KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
747KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
748else 748else
749 749
750# These warnings generated too much noise in a regular build. 750# These warnings generated too much noise in a regular build.
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 78a647080ebc..199ebc1c4538 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -22,6 +22,7 @@ config ARM_PTDUMP_DEBUGFS
22 22
23config DEBUG_WX 23config DEBUG_WX
24 bool "Warn on W+X mappings at boot" 24 bool "Warn on W+X mappings at boot"
25 depends on MMU
25 select ARM_PTDUMP_CORE 26 select ARM_PTDUMP_CORE
26 ---help--- 27 ---help---
27 Generate a warning if any W+X mappings are found at boot. 28 Generate a warning if any W+X mappings are found at boot.
diff --git a/arch/arm/boot/deflate_xip_data.sh b/arch/arm/boot/deflate_xip_data.sh
index 1189598a25eb..5e7d758ebdd6 100755
--- a/arch/arm/boot/deflate_xip_data.sh
+++ b/arch/arm/boot/deflate_xip_data.sh
@@ -30,7 +30,7 @@ esac
30 30
31sym_val() { 31sym_val() {
32 # extract hex value for symbol in $1 32 # extract hex value for symbol in $1
33 local val=$($NM "$VMLINUX" | sed -n "/ $1$/{s/ .*$//p;q}") 33 local val=$($NM "$VMLINUX" 2>/dev/null | sed -n "/ $1\$/{s/ .*$//p;q}")
34 [ "$val" ] || { echo "can't find $1 in $VMLINUX" 1>&2; exit 1; } 34 [ "$val" ] || { echo "can't find $1 in $VMLINUX" 1>&2; exit 1; }
35 # convert from hex to decimal 35 # convert from hex to decimal
36 echo $((0x$val)) 36 echo $((0x$val))
@@ -48,12 +48,12 @@ data_end=$(($_edata_loc - $base_offset))
48file_end=$(stat -c "%s" "$XIPIMAGE") 48file_end=$(stat -c "%s" "$XIPIMAGE")
49if [ "$file_end" != "$data_end" ]; then 49if [ "$file_end" != "$data_end" ]; then
50 printf "end of xipImage doesn't match with _edata_loc (%#x vs %#x)\n" \ 50 printf "end of xipImage doesn't match with _edata_loc (%#x vs %#x)\n" \
51 $(($file_end + $base_offset)) $_edata_loc 2>&1 51 $(($file_end + $base_offset)) $_edata_loc 1>&2
52 exit 1; 52 exit 1;
53fi 53fi
54 54
55# be ready to clean up 55# be ready to clean up
56trap 'rm -f "$XIPIMAGE.tmp"' 0 1 2 3 56trap 'rm -f "$XIPIMAGE.tmp"; exit 1' 1 2 3
57 57
58# substitute the data section by a compressed version 58# substitute the data section by a compressed version
59$DD if="$XIPIMAGE" count=$data_start iflag=count_bytes of="$XIPIMAGE.tmp" 59$DD if="$XIPIMAGE" count=$data_start iflag=count_bytes of="$XIPIMAGE.tmp"
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index b0d8431a3700..ae2b8c952e80 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -42,6 +42,11 @@
42 }; 42 };
43 }; 43 };
44 44
45 memory@40000000 {
46 device_type = "memory";
47 reg = <0x40000000 0>;
48 };
49
45 ahb { 50 ahb {
46 compatible = "simple-bus"; 51 compatible = "simple-bus";
47 #address-cells = <1>; 52 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index 40de3b66c33f..2477ebc11d9d 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -42,6 +42,11 @@
42 }; 42 };
43 }; 43 };
44 44
45 memory@80000000 {
46 device_type = "memory";
47 reg = <0x80000000 0>;
48 };
49
45 ahb { 50 ahb {
46 compatible = "simple-bus"; 51 compatible = "simple-bus";
47 #address-cells = <1>; 52 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index a7a5dc7b2700..e7d2db839d70 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -82,7 +82,7 @@
82 enable-active-high; 82 enable-active-high;
83 }; 83 };
84 84
85 reg_usb_otg2_vbus: regulator-usb-otg1-vbus { 85 reg_usb_otg2_vbus: regulator-usb-otg2-vbus {
86 compatible = "regulator-fixed"; 86 compatible = "regulator-fixed";
87 regulator-name = "usb_otg2_vbus"; 87 regulator-name = "usb_otg2_vbus";
88 regulator-min-microvolt = <5000000>; 88 regulator-min-microvolt = <5000000>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 6102e4e7f35c..354aff45c1af 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -927,6 +927,7 @@
927 i2s: i2s@ff890000 { 927 i2s: i2s@ff890000 {
928 compatible = "rockchip,rk3288-i2s", "rockchip,rk3066-i2s"; 928 compatible = "rockchip,rk3288-i2s", "rockchip,rk3066-i2s";
929 reg = <0x0 0xff890000 0x0 0x10000>; 929 reg = <0x0 0xff890000 0x0 0x10000>;
930 #sound-dai-cells = <0>;
930 interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; 931 interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
931 #address-cells = <1>; 932 #address-cells = <1>;
932 #size-cells = <0>; 933 #size-cells = <0>;
@@ -1176,6 +1177,7 @@
1176 compatible = "rockchip,rk3288-dw-hdmi"; 1177 compatible = "rockchip,rk3288-dw-hdmi";
1177 reg = <0x0 0xff980000 0x0 0x20000>; 1178 reg = <0x0 0xff980000 0x0 0x20000>;
1178 reg-io-width = <4>; 1179 reg-io-width = <4>;
1180 #sound-dai-cells = <0>;
1179 rockchip,grf = <&grf>; 1181 rockchip,grf = <&grf>;
1180 interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; 1182 interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
1181 clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>, <&cru SCLK_HDMI_CEC>; 1183 clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>, <&cru SCLK_HDMI_CEC>;
diff --git a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
index 51e6f1d21c32..b2758dd8ce43 100644
--- a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
@@ -42,7 +42,6 @@
42 42
43/dts-v1/; 43/dts-v1/;
44#include "sun6i-a31s.dtsi" 44#include "sun6i-a31s.dtsi"
45#include "sunxi-common-regulators.dtsi"
46#include <dt-bindings/gpio/gpio.h> 45#include <dt-bindings/gpio/gpio.h>
47 46
48/ { 47/ {
@@ -99,6 +98,7 @@
99 pinctrl-0 = <&gmac_pins_rgmii_a>, <&gmac_phy_reset_pin_bpi_m2>; 98 pinctrl-0 = <&gmac_pins_rgmii_a>, <&gmac_phy_reset_pin_bpi_m2>;
100 phy = <&phy1>; 99 phy = <&phy1>;
101 phy-mode = "rgmii"; 100 phy-mode = "rgmii";
101 phy-supply = <&reg_dldo1>;
102 snps,reset-gpio = <&pio 0 21 GPIO_ACTIVE_HIGH>; /* PA21 */ 102 snps,reset-gpio = <&pio 0 21 GPIO_ACTIVE_HIGH>; /* PA21 */
103 snps,reset-active-low; 103 snps,reset-active-low;
104 snps,reset-delays-us = <0 10000 30000>; 104 snps,reset-delays-us = <0 10000 30000>;
@@ -118,7 +118,7 @@
118&mmc0 { 118&mmc0 {
119 pinctrl-names = "default"; 119 pinctrl-names = "default";
120 pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_bpi_m2>; 120 pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_bpi_m2>;
121 vmmc-supply = <&reg_vcc3v0>; 121 vmmc-supply = <&reg_dcdc1>;
122 bus-width = <4>; 122 bus-width = <4>;
123 cd-gpios = <&pio 0 4 GPIO_ACTIVE_HIGH>; /* PA4 */ 123 cd-gpios = <&pio 0 4 GPIO_ACTIVE_HIGH>; /* PA4 */
124 cd-inverted; 124 cd-inverted;
@@ -132,7 +132,7 @@
132&mmc2 { 132&mmc2 {
133 pinctrl-names = "default"; 133 pinctrl-names = "default";
134 pinctrl-0 = <&mmc2_pins_a>; 134 pinctrl-0 = <&mmc2_pins_a>;
135 vmmc-supply = <&reg_vcc3v0>; 135 vmmc-supply = <&reg_aldo1>;
136 mmc-pwrseq = <&mmc2_pwrseq>; 136 mmc-pwrseq = <&mmc2_pwrseq>;
137 bus-width = <4>; 137 bus-width = <4>;
138 non-removable; 138 non-removable;
@@ -163,6 +163,8 @@
163 reg = <0x68>; 163 reg = <0x68>;
164 interrupt-parent = <&nmi_intc>; 164 interrupt-parent = <&nmi_intc>;
165 interrupts = <0 IRQ_TYPE_LEVEL_LOW>; 165 interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
166 eldoin-supply = <&reg_dcdc1>;
167 x-powers,drive-vbus-en;
166 }; 168 };
167}; 169};
168 170
@@ -193,7 +195,28 @@
193 195
194#include "axp22x.dtsi" 196#include "axp22x.dtsi"
195 197
198&reg_aldo1 {
199 regulator-min-microvolt = <3300000>;
200 regulator-max-microvolt = <3300000>;
201 regulator-name = "vcc-wifi";
202};
203
204&reg_aldo2 {
205 regulator-always-on;
206 regulator-min-microvolt = <2500000>;
207 regulator-max-microvolt = <2500000>;
208 regulator-name = "vcc-gmac";
209};
210
211&reg_aldo3 {
212 regulator-always-on;
213 regulator-min-microvolt = <3000000>;
214 regulator-max-microvolt = <3000000>;
215 regulator-name = "avcc";
216};
217
196&reg_dc5ldo { 218&reg_dc5ldo {
219 regulator-always-on;
197 regulator-min-microvolt = <700000>; 220 regulator-min-microvolt = <700000>;
198 regulator-max-microvolt = <1320000>; 221 regulator-max-microvolt = <1320000>;
199 regulator-name = "vdd-cpus"; 222 regulator-name = "vdd-cpus";
@@ -233,6 +256,40 @@
233 regulator-name = "vcc-dram"; 256 regulator-name = "vcc-dram";
234}; 257};
235 258
259&reg_dldo1 {
260 regulator-min-microvolt = <3000000>;
261 regulator-max-microvolt = <3000000>;
262 regulator-name = "vcc-mac";
263};
264
265&reg_dldo2 {
266 regulator-min-microvolt = <2800000>;
267 regulator-max-microvolt = <2800000>;
268 regulator-name = "avdd-csi";
269};
270
271&reg_dldo3 {
272 regulator-always-on;
273 regulator-min-microvolt = <3300000>;
274 regulator-max-microvolt = <3300000>;
275 regulator-name = "vcc-pb";
276};
277
278&reg_eldo1 {
279 regulator-min-microvolt = <1800000>;
280 regulator-max-microvolt = <1800000>;
281 regulator-name = "vdd-csi";
282 status = "okay";
283};
284
285&reg_ldo_io1 {
286 regulator-always-on;
287 regulator-min-microvolt = <1800000>;
288 regulator-max-microvolt = <1800000>;
289 regulator-name = "vcc-pm-cpus";
290 status = "okay";
291};
292
236&uart0 { 293&uart0 {
237 pinctrl-names = "default"; 294 pinctrl-names = "default";
238 pinctrl-0 = <&uart0_pins_a>; 295 pinctrl-0 = <&uart0_pins_a>;
diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
index 9c99e817535e..5b85889f82ee 100644
--- a/arch/arm/include/asm/vdso.h
+++ b/arch/arm/include/asm/vdso.h
@@ -12,8 +12,6 @@ struct mm_struct;
12 12
13void arm_install_vdso(struct mm_struct *mm, unsigned long addr); 13void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
14 14
15extern char vdso_start, vdso_end;
16
17extern unsigned int vdso_total_pages; 15extern unsigned int vdso_total_pages;
18 16
19#else /* CONFIG_VDSO */ 17#else /* CONFIG_VDSO */
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index a4d6dc0f2427..f4dd7f9663c1 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -39,6 +39,8 @@
39 39
40static struct page **vdso_text_pagelist; 40static struct page **vdso_text_pagelist;
41 41
42extern char vdso_start[], vdso_end[];
43
42/* Total number of pages needed for the data and text portions of the VDSO. */ 44/* Total number of pages needed for the data and text portions of the VDSO. */
43unsigned int vdso_total_pages __ro_after_init; 45unsigned int vdso_total_pages __ro_after_init;
44 46
@@ -197,13 +199,13 @@ static int __init vdso_init(void)
197 unsigned int text_pages; 199 unsigned int text_pages;
198 int i; 200 int i;
199 201
200 if (memcmp(&vdso_start, "\177ELF", 4)) { 202 if (memcmp(vdso_start, "\177ELF", 4)) {
201 pr_err("VDSO is not a valid ELF object!\n"); 203 pr_err("VDSO is not a valid ELF object!\n");
202 return -ENOEXEC; 204 return -ENOEXEC;
203 } 205 }
204 206
205 text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; 207 text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
206 pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start); 208 pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
207 209
208 /* Allocate the VDSO text pagelist */ 210 /* Allocate the VDSO text pagelist */
209 vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *), 211 vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
@@ -218,7 +220,7 @@ static int __init vdso_init(void)
218 for (i = 0; i < text_pages; i++) { 220 for (i = 0; i < text_pages; i++) {
219 struct page *page; 221 struct page *page;
220 222
221 page = virt_to_page(&vdso_start + i * PAGE_SIZE); 223 page = virt_to_page(vdso_start + i * PAGE_SIZE);
222 vdso_text_pagelist[i] = page; 224 vdso_text_pagelist[i] = page;
223 } 225 }
224 226
@@ -229,7 +231,7 @@ static int __init vdso_init(void)
229 231
230 cntvct_ok = cntvct_functional(); 232 cntvct_ok = cntvct_functional();
231 233
232 patch_vdso(&vdso_start); 234 patch_vdso(vdso_start);
233 235
234 return 0; 236 return 0;
235} 237}
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index a3e78074be70..62eb7d668890 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -127,8 +127,8 @@ static struct gpiod_lookup_table mmc_gpios_table = {
127 .dev_id = "da830-mmc.0", 127 .dev_id = "da830-mmc.0",
128 .table = { 128 .table = {
129 /* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/ 129 /* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/
130 GPIO_LOOKUP("davinci_gpio.1", 28, "cd", GPIO_ACTIVE_LOW), 130 GPIO_LOOKUP("davinci_gpio.0", 28, "cd", GPIO_ACTIVE_LOW),
131 GPIO_LOOKUP("davinci_gpio.1", 29, "wp", GPIO_ACTIVE_LOW), 131 GPIO_LOOKUP("davinci_gpio.0", 29, "wp", GPIO_ACTIVE_LOW),
132 }, 132 },
133}; 133};
134 134
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 7e5d7a083707..36cd23c8be9b 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -133,6 +133,9 @@ static void __init u8500_init_machine(void)
133 if (of_machine_is_compatible("st-ericsson,u8540")) 133 if (of_machine_is_compatible("st-ericsson,u8540"))
134 of_platform_populate(NULL, u8500_local_bus_nodes, 134 of_platform_populate(NULL, u8500_local_bus_nodes,
135 u8540_auxdata_lookup, NULL); 135 u8540_auxdata_lookup, NULL);
136 else
137 of_platform_populate(NULL, u8500_local_bus_nodes,
138 NULL, NULL);
136} 139}
137 140
138static const char * stericsson_dt_platform_compat[] = { 141static const char * stericsson_dt_platform_compat[] = {
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index d443e481c3e9..8805a59bae53 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -888,11 +888,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
888 timer->irq = irq->start; 888 timer->irq = irq->start;
889 timer->pdev = pdev; 889 timer->pdev = pdev;
890 890
891 /* Skip pm_runtime_enable for OMAP1 */ 891 pm_runtime_enable(dev);
892 if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) { 892 pm_runtime_irq_safe(dev);
893 pm_runtime_enable(dev);
894 pm_runtime_irq_safe(dev);
895 }
896 893
897 if (!timer->reserved) { 894 if (!timer->reserved) {
898 ret = pm_runtime_get_sync(dev); 895 ret = pm_runtime_get_sync(dev);
diff --git a/arch/arm/plat-omap/include/plat/sram.h b/arch/arm/plat-omap/include/plat/sram.h
index fb061cf0d736..30a07730807a 100644
--- a/arch/arm/plat-omap/include/plat/sram.h
+++ b/arch/arm/plat-omap/include/plat/sram.h
@@ -5,13 +5,4 @@ void omap_map_sram(unsigned long start, unsigned long size,
5 unsigned long skip, int cached); 5 unsigned long skip, int cached);
6void omap_sram_reset(void); 6void omap_sram_reset(void);
7 7
8extern void *omap_sram_push_address(unsigned long size); 8extern void *omap_sram_push(void *funcp, unsigned long size);
9
10/* Macro to push a function to the internal SRAM, using the fncpy API */
11#define omap_sram_push(funcp, size) ({ \
12 typeof(&(funcp)) _res = NULL; \
13 void *_sram_address = omap_sram_push_address(size); \
14 if (_sram_address) \
15 _res = fncpy(_sram_address, &(funcp), size); \
16 _res; \
17})
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index a5bc92d7e476..921840acf65c 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -23,6 +23,7 @@
23#include <asm/fncpy.h> 23#include <asm/fncpy.h>
24#include <asm/tlb.h> 24#include <asm/tlb.h>
25#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
26#include <asm/set_memory.h>
26 27
27#include <asm/mach/map.h> 28#include <asm/mach/map.h>
28 29
@@ -42,7 +43,7 @@ static void __iomem *omap_sram_ceil;
42 * Note that fncpy requires the returned address to be aligned 43 * Note that fncpy requires the returned address to be aligned
43 * to an 8-byte boundary. 44 * to an 8-byte boundary.
44 */ 45 */
45void *omap_sram_push_address(unsigned long size) 46static void *omap_sram_push_address(unsigned long size)
46{ 47{
47 unsigned long available, new_ceil = (unsigned long)omap_sram_ceil; 48 unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
48 49
@@ -60,6 +61,30 @@ void *omap_sram_push_address(unsigned long size)
60 return (void *)omap_sram_ceil; 61 return (void *)omap_sram_ceil;
61} 62}
62 63
64void *omap_sram_push(void *funcp, unsigned long size)
65{
66 void *sram;
67 unsigned long base;
68 int pages;
69 void *dst = NULL;
70
71 sram = omap_sram_push_address(size);
72 if (!sram)
73 return NULL;
74
75 base = (unsigned long)sram & PAGE_MASK;
76 pages = PAGE_ALIGN(size) / PAGE_SIZE;
77
78 set_memory_rw(base, pages);
79
80 dst = fncpy(sram, funcp, size);
81
82 set_memory_ro(base, pages);
83 set_memory_x(base, pages);
84
85 return dst;
86}
87
63/* 88/*
64 * The SRAM context is lost during off-idle and stack 89 * The SRAM context is lost during off-idle and stack
65 * needs to be reset. 90 * needs to be reset.
@@ -75,6 +100,9 @@ void omap_sram_reset(void)
75void __init omap_map_sram(unsigned long start, unsigned long size, 100void __init omap_map_sram(unsigned long start, unsigned long size,
76 unsigned long skip, int cached) 101 unsigned long skip, int cached)
77{ 102{
103 unsigned long base;
104 int pages;
105
78 if (size == 0) 106 if (size == 0)
79 return; 107 return;
80 108
@@ -95,4 +123,10 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
95 */ 123 */
96 memset_io(omap_sram_base + omap_sram_skip, 0, 124 memset_io(omap_sram_base + omap_sram_skip, 0,
97 omap_sram_size - omap_sram_skip); 125 omap_sram_size - omap_sram_skip);
126
127 base = (unsigned long)omap_sram_base;
128 pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE;
129
130 set_memory_ro(base, pages);
131 set_memory_x(base, pages);
98} 132}
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 03c6a3c72f9c..4c375e11ae95 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -648,7 +648,7 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
648 */ 648 */
649static int vfp_dying_cpu(unsigned int cpu) 649static int vfp_dying_cpu(unsigned int cpu)
650{ 650{
651 vfp_force_reload(cpu, current_thread_info()); 651 vfp_current_hw_state[cpu] = NULL;
652 return 0; 652 return 0;
653} 653}
654 654
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
index 03f195025390..204bdb9857b9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -406,8 +406,9 @@
406 wlan_pd_n: wlan-pd-n { 406 wlan_pd_n: wlan-pd-n {
407 compatible = "regulator-fixed"; 407 compatible = "regulator-fixed";
408 regulator-name = "wlan_pd_n"; 408 regulator-name = "wlan_pd_n";
409 pinctrl-names = "default";
410 pinctrl-0 = <&wlan_module_reset_l>;
409 411
410 /* Note the wlan_module_reset_l pinctrl */
411 enable-active-high; 412 enable-active-high;
412 gpio = <&gpio1 11 GPIO_ACTIVE_HIGH>; 413 gpio = <&gpio1 11 GPIO_ACTIVE_HIGH>;
413 414
@@ -983,12 +984,6 @@ ap_i2c_audio: &i2c8 {
983 pinctrl-0 = < 984 pinctrl-0 = <
984 &ap_pwroff /* AP will auto-assert this when in S3 */ 985 &ap_pwroff /* AP will auto-assert this when in S3 */
985 &clk_32k /* This pin is always 32k on gru boards */ 986 &clk_32k /* This pin is always 32k on gru boards */
986
987 /*
988 * We want this driven low ASAP; firmware should help us, but
989 * we can help ourselves too.
990 */
991 &wlan_module_reset_l
992 >; 987 >;
993 988
994 pcfg_output_low: pcfg-output-low { 989 pcfg_output_low: pcfg-output-low {
@@ -1168,12 +1163,7 @@ ap_i2c_audio: &i2c8 {
1168 }; 1163 };
1169 1164
1170 wlan_module_reset_l: wlan-module-reset-l { 1165 wlan_module_reset_l: wlan-module-reset-l {
1171 /* 1166 rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_pull_none>;
1172 * We want this driven low ASAP (As {Soon,Strongly} As
1173 * Possible), to avoid leakage through the powered-down
1174 * WiFi.
1175 */
1176 rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_output_low>;
1177 }; 1167 };
1178 1168
1179 bt_host_wake_l: bt-host-wake-l { 1169 bt_host_wake_l: bt-host-wake-l {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 2605118d4b4c..0b81ca1d07e7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -411,8 +411,8 @@
411 reg = <0x0 0xfe800000 0x0 0x100000>; 411 reg = <0x0 0xfe800000 0x0 0x100000>;
412 interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>; 412 interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>;
413 dr_mode = "otg"; 413 dr_mode = "otg";
414 phys = <&u2phy0_otg>, <&tcphy0_usb3>; 414 phys = <&u2phy0_otg>;
415 phy-names = "usb2-phy", "usb3-phy"; 415 phy-names = "usb2-phy";
416 phy_type = "utmi_wide"; 416 phy_type = "utmi_wide";
417 snps,dis_enblslpm_quirk; 417 snps,dis_enblslpm_quirk;
418 snps,dis-u2-freeclk-exists-quirk; 418 snps,dis-u2-freeclk-exists-quirk;
@@ -444,8 +444,8 @@
444 reg = <0x0 0xfe900000 0x0 0x100000>; 444 reg = <0x0 0xfe900000 0x0 0x100000>;
445 interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>; 445 interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>;
446 dr_mode = "otg"; 446 dr_mode = "otg";
447 phys = <&u2phy1_otg>, <&tcphy1_usb3>; 447 phys = <&u2phy1_otg>;
448 phy-names = "usb2-phy", "usb3-phy"; 448 phy-names = "usb2-phy";
449 phy_type = "utmi_wide"; 449 phy_type = "utmi_wide";
450 snps,dis_enblslpm_quirk; 450 snps,dis_enblslpm_quirk;
451 snps,dis-u2-freeclk-exists-quirk; 451 snps,dis-u2-freeclk-exists-quirk;
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig
index 692ae85a3e3d..8e3a1fc2bc39 100644
--- a/arch/mips/lantiq/Kconfig
+++ b/arch/mips/lantiq/Kconfig
@@ -13,6 +13,8 @@ choice
13config SOC_AMAZON_SE 13config SOC_AMAZON_SE
14 bool "Amazon SE" 14 bool "Amazon SE"
15 select SOC_TYPE_XWAY 15 select SOC_TYPE_XWAY
16 select MFD_SYSCON
17 select MFD_CORE
16 18
17config SOC_XWAY 19config SOC_XWAY
18 bool "XWAY" 20 bool "XWAY"
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 52500d3b7004..e0af39b33e28 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -549,9 +549,9 @@ void __init ltq_soc_init(void)
549 clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(), 549 clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
550 ltq_ar9_fpi_hz(), CLOCK_250M); 550 ltq_ar9_fpi_hz(), CLOCK_250M);
551 clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); 551 clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
552 clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); 552 clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
553 clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P); 553 clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
554 clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1); 554 clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
555 clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH); 555 clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH);
556 clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); 556 clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
557 clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); 557 clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
@@ -560,7 +560,7 @@ void __init ltq_soc_init(void)
560 } else { 560 } else {
561 clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(), 561 clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
562 ltq_danube_fpi_hz(), ltq_danube_pp32_hz()); 562 ltq_danube_fpi_hz(), ltq_danube_pp32_hz());
563 clkdev_add_pmu("1f203018.usb2-phy", "ctrl", 1, 0, PMU_USB0); 563 clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
564 clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); 564 clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
565 clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); 565 clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
566 clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); 566 clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c
index 1b274742077d..d2718de60b9b 100644
--- a/arch/mips/ralink/mt7621.c
+++ b/arch/mips/ralink/mt7621.c
@@ -170,6 +170,28 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
170 u32 n1; 170 u32 n1;
171 u32 rev; 171 u32 rev;
172 172
173 /* Early detection of CMP support */
174 mips_cm_probe();
175 mips_cpc_probe();
176
177 if (mips_cps_numiocu(0)) {
178 /*
179 * mips_cm_probe() wipes out bootloader
180 * config for CM regions and we have to configure them
181 * again. This SoC cannot talk to pamlbus devices
182 * witout proper iocu region set up.
183 *
184 * FIXME: it would be better to do this with values
185 * from DT, but we need this very early because
186 * without this we cannot talk to pretty much anything
187 * including serial.
188 */
189 write_gcr_reg0_base(MT7621_PALMBUS_BASE);
190 write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE |
191 CM_GCR_REGn_MASK_CMTGT_IOCU0);
192 __sync();
193 }
194
173 n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); 195 n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
174 n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); 196 n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
175 197
@@ -194,26 +216,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
194 216
195 rt2880_pinmux_data = mt7621_pinmux_data; 217 rt2880_pinmux_data = mt7621_pinmux_data;
196 218
197 /* Early detection of CMP support */
198 mips_cm_probe();
199 mips_cpc_probe();
200
201 if (mips_cps_numiocu(0)) {
202 /*
203 * mips_cm_probe() wipes out bootloader
204 * config for CM regions and we have to configure them
205 * again. This SoC cannot talk to pamlbus devices
206 * witout proper iocu region set up.
207 *
208 * FIXME: it would be better to do this with values
209 * from DT, but we need this very early because
210 * without this we cannot talk to pretty much anything
211 * including serial.
212 */
213 write_gcr_reg0_base(MT7621_PALMBUS_BASE);
214 write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE |
215 CM_GCR_REGn_MASK_CMTGT_IOCU0);
216 }
217 219
218 if (!register_cps_smp_ops()) 220 if (!register_cps_smp_ops())
219 return; 221 return;
diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c
index 64543d66e76b..e9531fea23a2 100644
--- a/arch/mips/ralink/reset.c
+++ b/arch/mips/ralink/reset.c
@@ -96,16 +96,9 @@ static void ralink_restart(char *command)
96 unreachable(); 96 unreachable();
97} 97}
98 98
99static void ralink_halt(void)
100{
101 local_irq_disable();
102 unreachable();
103}
104
105static int __init mips_reboot_setup(void) 99static int __init mips_reboot_setup(void)
106{ 100{
107 _machine_restart = ralink_restart; 101 _machine_restart = ralink_restart;
108 _machine_halt = ralink_halt;
109 102
110 return 0; 103 return 0;
111} 104}
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 0abeb0e2d616..37671feb2bf6 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -87,6 +87,9 @@ typedef struct {
87 /* Number of bits in the mm_cpumask */ 87 /* Number of bits in the mm_cpumask */
88 atomic_t active_cpus; 88 atomic_t active_cpus;
89 89
90 /* Number of users of the external (Nest) MMU */
91 atomic_t copros;
92
90 /* NPU NMMU context */ 93 /* NPU NMMU context */
91 struct npu_context *npu_context; 94 struct npu_context *npu_context;
92 95
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 8eea90f80e45..19b45ba6caf9 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -47,9 +47,6 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
47#endif 47#endif
48extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); 48extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
49extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr); 49extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
50extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
51 unsigned long page_size);
52extern void radix__flush_tlb_lpid(unsigned long lpid);
53extern void radix__flush_tlb_all(void); 50extern void radix__flush_tlb_all(void);
54extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, 51extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
55 unsigned long address); 52 unsigned long address);
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index a2c5c95882cf..2e2bacbdf6ed 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -203,6 +203,7 @@ static inline void cpu_feature_keys_init(void) { }
203#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) 203#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
204#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000) 204#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
205#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000) 205#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
206#define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x2000000000000000)
206#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000) 207#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
207#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x8000000000000000) 208#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x8000000000000000)
208 209
@@ -465,7 +466,7 @@ static inline void cpu_feature_keys_init(void) { }
465 CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ 466 CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
466 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ 467 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
467 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | \ 468 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | \
468 CPU_FTR_PKEY) 469 CPU_FTR_PKEY | CPU_FTR_P9_TLBIE_BUG)
469#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \ 470#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
470 (~CPU_FTR_SAO)) 471 (~CPU_FTR_SAO))
471#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9 472#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 051b3d63afe3..3a15b6db9501 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -92,15 +92,23 @@ static inline void dec_mm_active_cpus(struct mm_struct *mm)
92static inline void mm_context_add_copro(struct mm_struct *mm) 92static inline void mm_context_add_copro(struct mm_struct *mm)
93{ 93{
94 /* 94 /*
95 * On hash, should only be called once over the lifetime of 95 * If any copro is in use, increment the active CPU count
96 * the context, as we can't decrement the active cpus count 96 * in order to force TLB invalidations to be global as to
97 * and flush properly for the time being. 97 * propagate to the Nest MMU.
98 */ 98 */
99 inc_mm_active_cpus(mm); 99 if (atomic_inc_return(&mm->context.copros) == 1)
100 inc_mm_active_cpus(mm);
100} 101}
101 102
102static inline void mm_context_remove_copro(struct mm_struct *mm) 103static inline void mm_context_remove_copro(struct mm_struct *mm)
103{ 104{
105 int c;
106
107 c = atomic_dec_if_positive(&mm->context.copros);
108
109 /* Detect imbalance between add and remove */
110 WARN_ON(c < 0);
111
104 /* 112 /*
105 * Need to broadcast a global flush of the full mm before 113 * Need to broadcast a global flush of the full mm before
106 * decrementing active_cpus count, as the next TLBI may be 114 * decrementing active_cpus count, as the next TLBI may be
@@ -111,7 +119,7 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
111 * for the time being. Invalidations will remain global if 119 * for the time being. Invalidations will remain global if
112 * used on hash. 120 * used on hash.
113 */ 121 */
114 if (radix_enabled()) { 122 if (c == 0 && radix_enabled()) {
115 flush_all_mm(mm); 123 flush_all_mm(mm);
116 dec_mm_active_cpus(mm); 124 dec_mm_active_cpus(mm);
117 } 125 }
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 945e2c29ad2d..8ca5d5b74618 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -709,6 +709,9 @@ static __init void cpufeatures_cpu_quirks(void)
709 cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1; 709 cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
710 else if ((version & 0xffffefff) == 0x004e0201) 710 else if ((version & 0xffffefff) == 0x004e0201)
711 cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; 711 cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
712
713 if ((version & 0xffff0000) == 0x004e0000)
714 cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
712} 715}
713 716
714static void __init cpufeatures_setup_finished(void) 717static void __init cpufeatures_setup_finished(void)
@@ -720,6 +723,9 @@ static void __init cpufeatures_setup_finished(void)
720 cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE; 723 cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
721 } 724 }
722 725
726 /* Make sure powerpc_base_platform is non-NULL */
727 powerpc_base_platform = cur_cpu_spec->platform;
728
723 system_registers.lpcr = mfspr(SPRN_LPCR); 729 system_registers.lpcr = mfspr(SPRN_LPCR);
724 system_registers.hfscr = mfspr(SPRN_HFSCR); 730 system_registers.hfscr = mfspr(SPRN_HFSCR);
725 system_registers.fscr = mfspr(SPRN_FSCR); 731 system_registers.fscr = mfspr(SPRN_FSCR);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 3ac87e53b3da..1ecfd8ffb098 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -706,7 +706,7 @@ EXC_COMMON_BEGIN(bad_addr_slb)
706 ld r3, PACA_EXSLB+EX_DAR(r13) 706 ld r3, PACA_EXSLB+EX_DAR(r13)
707 std r3, _DAR(r1) 707 std r3, _DAR(r1)
708 beq cr6, 2f 708 beq cr6, 2f
709 li r10, 0x480 /* fix trap number for I-SLB miss */ 709 li r10, 0x481 /* fix trap number for I-SLB miss */
710 std r10, _TRAP(r1) 710 std r10, _TRAP(r1)
7112: bl save_nvgprs 7112: bl save_nvgprs
712 addi r3, r1, STACK_FRAME_OVERHEAD 712 addi r3, r1, STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f88038847790..061aa0f47bb1 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -476,6 +476,14 @@ void force_external_irq_replay(void)
476 */ 476 */
477 WARN_ON(!arch_irqs_disabled()); 477 WARN_ON(!arch_irqs_disabled());
478 478
479 /*
480 * Interrupts must always be hard disabled before irq_happened is
481 * modified (to prevent lost update in case of interrupt between
482 * load and store).
483 */
484 __hard_irq_disable();
485 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
486
479 /* Indicate in the PACA that we have an interrupt to replay */ 487 /* Indicate in the PACA that we have an interrupt to replay */
480 local_paca->irq_happened |= PACA_IRQ_EE; 488 local_paca->irq_happened |= PACA_IRQ_EE;
481} 489}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 5cb4e4687107..5d9bafe9a371 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -157,6 +157,9 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
157 asm volatile("ptesync": : :"memory"); 157 asm volatile("ptesync": : :"memory");
158 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) 158 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
159 : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); 159 : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
160 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG))
161 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
162 : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
160 asm volatile("ptesync": : :"memory"); 163 asm volatile("ptesync": : :"memory");
161} 164}
162 165
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 8888e625a999..e1c083fbe434 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -473,6 +473,17 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
473 trace_tlbie(kvm->arch.lpid, 0, rbvalues[i], 473 trace_tlbie(kvm->arch.lpid, 0, rbvalues[i],
474 kvm->arch.lpid, 0, 0, 0); 474 kvm->arch.lpid, 0, 0, 0);
475 } 475 }
476
477 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
478 /*
479 * Need the extra ptesync to make sure we don't
480 * re-order the tlbie
481 */
482 asm volatile("ptesync": : :"memory");
483 asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
484 "r" (rbvalues[0]), "r" (kvm->arch.lpid));
485 }
486
476 asm volatile("eieio; tlbsync; ptesync" : : : "memory"); 487 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
477 kvm->arch.tlbie_lock = 0; 488 kvm->arch.tlbie_lock = 0;
478 } else { 489 } else {
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index d33264697a31..f86a20270e50 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1557,6 +1557,24 @@ mc_cont:
1557 ptesync 1557 ptesync
15583: stw r5,VCPU_SLB_MAX(r9) 15583: stw r5,VCPU_SLB_MAX(r9)
1559 1559
1560 /* load host SLB entries */
1561BEGIN_MMU_FTR_SECTION
1562 b 0f
1563END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1564 ld r8,PACA_SLBSHADOWPTR(r13)
1565
1566 .rept SLB_NUM_BOLTED
1567 li r3, SLBSHADOW_SAVEAREA
1568 LDX_BE r5, r8, r3
1569 addi r3, r3, 8
1570 LDX_BE r6, r8, r3
1571 andis. r7,r5,SLB_ESID_V@h
1572 beq 1f
1573 slbmte r6,r5
15741: addi r8,r8,16
1575 .endr
15760:
1577
1560guest_bypass: 1578guest_bypass:
1561 stw r12, STACK_SLOT_TRAP(r1) 1579 stw r12, STACK_SLOT_TRAP(r1)
1562 mr r3, r12 1580 mr r3, r12
@@ -2018,23 +2036,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2018 mtspr SPRN_LPCR,r8 2036 mtspr SPRN_LPCR,r8
2019 isync 2037 isync
202048: 203848:
2021 /* load host SLB entries */
2022BEGIN_MMU_FTR_SECTION
2023 b 0f
2024END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
2025 ld r8,PACA_SLBSHADOWPTR(r13)
2026
2027 .rept SLB_NUM_BOLTED
2028 li r3, SLBSHADOW_SAVEAREA
2029 LDX_BE r5, r8, r3
2030 addi r3, r3, 8
2031 LDX_BE r6, r8, r3
2032 andis. r7,r5,SLB_ESID_V@h
2033 beq 1f
2034 slbmte r6,r5
20351: addi r8,r8,16
2036 .endr
20370:
2038#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2039#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2039 /* Finish timing, if we have a vcpu */ 2040 /* Finish timing, if we have a vcpu */
2040 ld r4, HSTATE_KVM_VCPU(r13) 2041 ld r4, HSTATE_KVM_VCPU(r13)
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index a0675e91ad7d..656933c85925 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -201,6 +201,15 @@ static inline unsigned long ___tlbie(unsigned long vpn, int psize,
201 return va; 201 return va;
202} 202}
203 203
204static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
205{
206 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
207 /* Need the extra ptesync to ensure we don't reorder tlbie*/
208 asm volatile("ptesync": : :"memory");
209 ___tlbie(vpn, psize, apsize, ssize);
210 }
211}
212
204static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) 213static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
205{ 214{
206 unsigned long rb; 215 unsigned long rb;
@@ -278,6 +287,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize,
278 asm volatile("ptesync": : :"memory"); 287 asm volatile("ptesync": : :"memory");
279 } else { 288 } else {
280 __tlbie(vpn, psize, apsize, ssize); 289 __tlbie(vpn, psize, apsize, ssize);
290 fixup_tlbie(vpn, psize, apsize, ssize);
281 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 291 asm volatile("eieio; tlbsync; ptesync": : :"memory");
282 } 292 }
283 if (lock_tlbie && !use_local) 293 if (lock_tlbie && !use_local)
@@ -771,7 +781,7 @@ static void native_hpte_clear(void)
771 */ 781 */
772static void native_flush_hash_range(unsigned long number, int local) 782static void native_flush_hash_range(unsigned long number, int local)
773{ 783{
774 unsigned long vpn; 784 unsigned long vpn = 0;
775 unsigned long hash, index, hidx, shift, slot; 785 unsigned long hash, index, hidx, shift, slot;
776 struct hash_pte *hptep; 786 struct hash_pte *hptep;
777 unsigned long hpte_v; 787 unsigned long hpte_v;
@@ -843,6 +853,10 @@ static void native_flush_hash_range(unsigned long number, int local)
843 __tlbie(vpn, psize, psize, ssize); 853 __tlbie(vpn, psize, psize, ssize);
844 } pte_iterate_hashed_end(); 854 } pte_iterate_hashed_end();
845 } 855 }
856 /*
857 * Just do one more with the last used values.
858 */
859 fixup_tlbie(vpn, psize, psize, ssize);
846 asm volatile("eieio; tlbsync; ptesync":::"memory"); 860 asm volatile("eieio; tlbsync; ptesync":::"memory");
847 861
848 if (lock_tlbie) 862 if (lock_tlbie)
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 929d9ef7083f..3f980baade4c 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -173,6 +173,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
173 mm_iommu_init(mm); 173 mm_iommu_init(mm);
174#endif 174#endif
175 atomic_set(&mm->context.active_cpus, 0); 175 atomic_set(&mm->context.active_cpus, 0);
176 atomic_set(&mm->context.copros, 0);
176 177
177 return 0; 178 return 0;
178} 179}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 28c980eb4422..adf469f312f2 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -481,6 +481,7 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
481 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 481 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
482 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); 482 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
483 } 483 }
484 /* do we need fixup here ?*/
484 asm volatile("eieio; tlbsync; ptesync" : : : "memory"); 485 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
485} 486}
486EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); 487EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 71d1b19ad1c0..a07f5372a4bf 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -119,6 +119,49 @@ static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
119 trace_tlbie(0, 0, rb, rs, ric, prs, r); 119 trace_tlbie(0, 0, rb, rs, ric, prs, r);
120} 120}
121 121
122static inline void __tlbiel_va(unsigned long va, unsigned long pid,
123 unsigned long ap, unsigned long ric)
124{
125 unsigned long rb,rs,prs,r;
126
127 rb = va & ~(PPC_BITMASK(52, 63));
128 rb |= ap << PPC_BITLSHIFT(58);
129 rs = pid << PPC_BITLSHIFT(31);
130 prs = 1; /* process scoped */
131 r = 1; /* raidx format */
132
133 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
134 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
135 trace_tlbie(0, 1, rb, rs, ric, prs, r);
136}
137
138static inline void __tlbie_va(unsigned long va, unsigned long pid,
139 unsigned long ap, unsigned long ric)
140{
141 unsigned long rb,rs,prs,r;
142
143 rb = va & ~(PPC_BITMASK(52, 63));
144 rb |= ap << PPC_BITLSHIFT(58);
145 rs = pid << PPC_BITLSHIFT(31);
146 prs = 1; /* process scoped */
147 r = 1; /* raidx format */
148
149 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
150 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
151 trace_tlbie(0, 0, rb, rs, ric, prs, r);
152}
153
154static inline void fixup_tlbie(void)
155{
156 unsigned long pid = 0;
157 unsigned long va = ((1UL << 52) - 1);
158
159 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
160 asm volatile("ptesync": : :"memory");
161 __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
162 }
163}
164
122/* 165/*
123 * We use 128 set in radix mode and 256 set in hpt mode. 166 * We use 128 set in radix mode and 256 set in hpt mode.
124 */ 167 */
@@ -151,24 +194,25 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
151static inline void _tlbie_pid(unsigned long pid, unsigned long ric) 194static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
152{ 195{
153 asm volatile("ptesync": : :"memory"); 196 asm volatile("ptesync": : :"memory");
154 __tlbie_pid(pid, ric);
155 asm volatile("eieio; tlbsync; ptesync": : :"memory");
156}
157 197
158static inline void __tlbiel_va(unsigned long va, unsigned long pid, 198 /*
159 unsigned long ap, unsigned long ric) 199 * Workaround the fact that the "ric" argument to __tlbie_pid
160{ 200 * must be a compile-time contraint to match the "i" constraint
161 unsigned long rb,rs,prs,r; 201 * in the asm statement.
162 202 */
163 rb = va & ~(PPC_BITMASK(52, 63)); 203 switch (ric) {
164 rb |= ap << PPC_BITLSHIFT(58); 204 case RIC_FLUSH_TLB:
165 rs = pid << PPC_BITLSHIFT(31); 205 __tlbie_pid(pid, RIC_FLUSH_TLB);
166 prs = 1; /* process scoped */ 206 break;
167 r = 1; /* raidx format */ 207 case RIC_FLUSH_PWC:
168 208 __tlbie_pid(pid, RIC_FLUSH_PWC);
169 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 209 break;
170 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); 210 case RIC_FLUSH_ALL:
171 trace_tlbie(0, 1, rb, rs, ric, prs, r); 211 default:
212 __tlbie_pid(pid, RIC_FLUSH_ALL);
213 }
214 fixup_tlbie();
215 asm volatile("eieio; tlbsync; ptesync": : :"memory");
172} 216}
173 217
174static inline void __tlbiel_va_range(unsigned long start, unsigned long end, 218static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
@@ -203,22 +247,6 @@ static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
203 asm volatile("ptesync": : :"memory"); 247 asm volatile("ptesync": : :"memory");
204} 248}
205 249
206static inline void __tlbie_va(unsigned long va, unsigned long pid,
207 unsigned long ap, unsigned long ric)
208{
209 unsigned long rb,rs,prs,r;
210
211 rb = va & ~(PPC_BITMASK(52, 63));
212 rb |= ap << PPC_BITLSHIFT(58);
213 rs = pid << PPC_BITLSHIFT(31);
214 prs = 1; /* process scoped */
215 r = 1; /* raidx format */
216
217 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
218 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
219 trace_tlbie(0, 0, rb, rs, ric, prs, r);
220}
221
222static inline void __tlbie_va_range(unsigned long start, unsigned long end, 250static inline void __tlbie_va_range(unsigned long start, unsigned long end,
223 unsigned long pid, unsigned long page_size, 251 unsigned long pid, unsigned long page_size,
224 unsigned long psize) 252 unsigned long psize)
@@ -237,6 +265,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
237 265
238 asm volatile("ptesync": : :"memory"); 266 asm volatile("ptesync": : :"memory");
239 __tlbie_va(va, pid, ap, ric); 267 __tlbie_va(va, pid, ap, ric);
268 fixup_tlbie();
240 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 269 asm volatile("eieio; tlbsync; ptesync": : :"memory");
241} 270}
242 271
@@ -248,6 +277,7 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
248 if (also_pwc) 277 if (also_pwc)
249 __tlbie_pid(pid, RIC_FLUSH_PWC); 278 __tlbie_pid(pid, RIC_FLUSH_PWC);
250 __tlbie_va_range(start, end, pid, page_size, psize); 279 __tlbie_va_range(start, end, pid, page_size, psize);
280 fixup_tlbie();
251 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 281 asm volatile("eieio; tlbsync; ptesync": : :"memory");
252} 282}
253 283
@@ -311,6 +341,16 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd
311} 341}
312EXPORT_SYMBOL(radix__local_flush_tlb_page); 342EXPORT_SYMBOL(radix__local_flush_tlb_page);
313 343
344static bool mm_needs_flush_escalation(struct mm_struct *mm)
345{
346 /*
347 * P9 nest MMU has issues with the page walk cache
348 * caching PTEs and not flushing them properly when
349 * RIC = 0 for a PID/LPID invalidate
350 */
351 return atomic_read(&mm->context.copros) != 0;
352}
353
314#ifdef CONFIG_SMP 354#ifdef CONFIG_SMP
315void radix__flush_tlb_mm(struct mm_struct *mm) 355void radix__flush_tlb_mm(struct mm_struct *mm)
316{ 356{
@@ -321,9 +361,12 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
321 return; 361 return;
322 362
323 preempt_disable(); 363 preempt_disable();
324 if (!mm_is_thread_local(mm)) 364 if (!mm_is_thread_local(mm)) {
325 _tlbie_pid(pid, RIC_FLUSH_TLB); 365 if (mm_needs_flush_escalation(mm))
326 else 366 _tlbie_pid(pid, RIC_FLUSH_ALL);
367 else
368 _tlbie_pid(pid, RIC_FLUSH_TLB);
369 } else
327 _tlbiel_pid(pid, RIC_FLUSH_TLB); 370 _tlbiel_pid(pid, RIC_FLUSH_TLB);
328 preempt_enable(); 371 preempt_enable();
329} 372}
@@ -435,10 +478,14 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
435 } 478 }
436 479
437 if (full) { 480 if (full) {
438 if (local) 481 if (local) {
439 _tlbiel_pid(pid, RIC_FLUSH_TLB); 482 _tlbiel_pid(pid, RIC_FLUSH_TLB);
440 else 483 } else {
441 _tlbie_pid(pid, RIC_FLUSH_TLB); 484 if (mm_needs_flush_escalation(mm))
485 _tlbie_pid(pid, RIC_FLUSH_ALL);
486 else
487 _tlbie_pid(pid, RIC_FLUSH_TLB);
488 }
442 } else { 489 } else {
443 bool hflush = false; 490 bool hflush = false;
444 unsigned long hstart, hend; 491 unsigned long hstart, hend;
@@ -465,6 +512,7 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
465 if (hflush) 512 if (hflush)
466 __tlbie_va_range(hstart, hend, pid, 513 __tlbie_va_range(hstart, hend, pid,
467 HPAGE_PMD_SIZE, MMU_PAGE_2M); 514 HPAGE_PMD_SIZE, MMU_PAGE_2M);
515 fixup_tlbie();
468 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 516 asm volatile("eieio; tlbsync; ptesync": : :"memory");
469 } 517 }
470 } 518 }
@@ -548,6 +596,9 @@ static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
548 } 596 }
549 597
550 if (full) { 598 if (full) {
599 if (!local && mm_needs_flush_escalation(mm))
600 also_pwc = true;
601
551 if (local) 602 if (local)
552 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); 603 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
553 else 604 else
@@ -603,46 +654,6 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
603} 654}
604#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 655#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
605 656
606void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
607 unsigned long page_size)
608{
609 unsigned long rb,rs,prs,r;
610 unsigned long ap;
611 unsigned long ric = RIC_FLUSH_TLB;
612
613 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
614 rb = gpa & ~(PPC_BITMASK(52, 63));
615 rb |= ap << PPC_BITLSHIFT(58);
616 rs = lpid & ((1UL << 32) - 1);
617 prs = 0; /* process scoped */
618 r = 1; /* raidx format */
619
620 asm volatile("ptesync": : :"memory");
621 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
622 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
623 asm volatile("eieio; tlbsync; ptesync": : :"memory");
624 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
625}
626EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
627
628void radix__flush_tlb_lpid(unsigned long lpid)
629{
630 unsigned long rb,rs,prs,r;
631 unsigned long ric = RIC_FLUSH_ALL;
632
633 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
634 rs = lpid & ((1UL << 32) - 1);
635 prs = 0; /* partition scoped */
636 r = 1; /* raidx format */
637
638 asm volatile("ptesync": : :"memory");
639 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
640 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
641 asm volatile("eieio; tlbsync; ptesync": : :"memory");
642 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
643}
644EXPORT_SYMBOL(radix__flush_tlb_lpid);
645
646void radix__flush_pmd_tlb_range(struct vm_area_struct *vma, 657void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
647 unsigned long start, unsigned long end) 658 unsigned long start, unsigned long end)
648{ 659{
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 8b8d2297d486..638411f22267 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -315,19 +315,6 @@ config X86_L1_CACHE_SHIFT
315 default "4" if MELAN || M486 || MGEODEGX1 315 default "4" if MELAN || M486 || MGEODEGX1
316 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX 316 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
317 317
318config X86_PPRO_FENCE
319 bool "PentiumPro memory ordering errata workaround"
320 depends on M686 || M586MMX || M586TSC || M586 || M486 || MGEODEGX1
321 ---help---
322 Old PentiumPro multiprocessor systems had errata that could cause
323 memory operations to violate the x86 ordering standard in rare cases.
324 Enabling this option will attempt to work around some (but not all)
325 occurrences of this problem, at the cost of much heavier spinlock and
326 memory barrier operations.
327
328 If unsure, say n here. Even distro kernels should think twice before
329 enabling this: there are few systems, and an unlikely bug.
330
331config X86_F00F_BUG 318config X86_F00F_BUG
332 def_bool y 319 def_bool y
333 depends on M586MMX || M586TSC || M586 || M486 320 depends on M586MMX || M586TSC || M586 || M486
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 498c1b812300..1c4d012550ec 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -223,6 +223,15 @@ KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr)
223 223
224LDFLAGS := -m elf_$(UTS_MACHINE) 224LDFLAGS := -m elf_$(UTS_MACHINE)
225 225
226#
227# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to
228# the linker to force 2MB page size regardless of the default page size used
229# by the linker.
230#
231ifdef CONFIG_X86_64
232LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
233endif
234
226# Speed up the build 235# Speed up the build
227KBUILD_CFLAGS += -pipe 236KBUILD_CFLAGS += -pipe
228# Workaround for a gcc prelease that unfortunately was shipped in a suse release 237# Workaround for a gcc prelease that unfortunately was shipped in a suse release
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 98761a1576ce..252fee320816 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -309,6 +309,10 @@ static void parse_elf(void *output)
309 309
310 switch (phdr->p_type) { 310 switch (phdr->p_type) {
311 case PT_LOAD: 311 case PT_LOAD:
312#ifdef CONFIG_X86_64
313 if ((phdr->p_align % 0x200000) != 0)
314 error("Alignment of LOAD segment isn't multiple of 2MB");
315#endif
312#ifdef CONFIG_RELOCATABLE 316#ifdef CONFIG_RELOCATABLE
313 dest = output; 317 dest = output;
314 dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); 318 dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 805f52703ee3..18ed349b4f83 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1138,7 +1138,7 @@ apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \
1138#endif /* CONFIG_HYPERV */ 1138#endif /* CONFIG_HYPERV */
1139 1139
1140idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1140idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
1141idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1141idtentry int3 do_int3 has_error_code=0
1142idtentry stack_segment do_stack_segment has_error_code=1 1142idtentry stack_segment do_stack_segment has_error_code=1
1143 1143
1144#ifdef CONFIG_XEN 1144#ifdef CONFIG_XEN
diff --git a/arch/x86/entry/vdso/vdso32/vclock_gettime.c b/arch/x86/entry/vdso/vdso32/vclock_gettime.c
index 7780bbfb06ef..9242b28418d5 100644
--- a/arch/x86/entry/vdso/vdso32/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vdso32/vclock_gettime.c
@@ -5,8 +5,6 @@
5#undef CONFIG_OPTIMIZE_INLINING 5#undef CONFIG_OPTIMIZE_INLINING
6#endif 6#endif
7 7
8#undef CONFIG_X86_PPRO_FENCE
9
10#ifdef CONFIG_X86_64 8#ifdef CONFIG_X86_64
11 9
12/* 10/*
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 8560ef68a9d6..317be365bce3 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -347,7 +347,7 @@ void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
347 set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); 347 set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
348 p4d = p4d_offset(pgd, VSYSCALL_ADDR); 348 p4d = p4d_offset(pgd, VSYSCALL_ADDR);
349#if CONFIG_PGTABLE_LEVELS >= 5 349#if CONFIG_PGTABLE_LEVELS >= 5
350 p4d->p4d |= _PAGE_USER; 350 set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
351#endif 351#endif
352 pud = pud_offset(p4d, VSYSCALL_ADDR); 352 pud = pud_offset(p4d, VSYSCALL_ADDR);
353 set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER)); 353 set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 140d33288e78..88797c80b3e0 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2118,7 +2118,8 @@ static int x86_pmu_event_init(struct perf_event *event)
2118 event->destroy(event); 2118 event->destroy(event);
2119 } 2119 }
2120 2120
2121 if (READ_ONCE(x86_pmu.attr_rdpmc)) 2121 if (READ_ONCE(x86_pmu.attr_rdpmc) &&
2122 !(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
2122 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; 2123 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
2123 2124
2124 return err; 2125 return err;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 56457cb73448..1e41d7508d99 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2952,9 +2952,9 @@ static void intel_pebs_aliases_skl(struct perf_event *event)
2952 return intel_pebs_aliases_precdist(event); 2952 return intel_pebs_aliases_precdist(event);
2953} 2953}
2954 2954
2955static unsigned long intel_pmu_free_running_flags(struct perf_event *event) 2955static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
2956{ 2956{
2957 unsigned long flags = x86_pmu.free_running_flags; 2957 unsigned long flags = x86_pmu.large_pebs_flags;
2958 2958
2959 if (event->attr.use_clockid) 2959 if (event->attr.use_clockid)
2960 flags &= ~PERF_SAMPLE_TIME; 2960 flags &= ~PERF_SAMPLE_TIME;
@@ -2976,8 +2976,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
2976 if (!event->attr.freq) { 2976 if (!event->attr.freq) {
2977 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; 2977 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
2978 if (!(event->attr.sample_type & 2978 if (!(event->attr.sample_type &
2979 ~intel_pmu_free_running_flags(event))) 2979 ~intel_pmu_large_pebs_flags(event)))
2980 event->hw.flags |= PERF_X86_EVENT_FREERUNNING; 2980 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
2981 } 2981 }
2982 if (x86_pmu.pebs_aliases) 2982 if (x86_pmu.pebs_aliases)
2983 x86_pmu.pebs_aliases(event); 2983 x86_pmu.pebs_aliases(event);
@@ -3194,7 +3194,7 @@ static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
3194 X86_CONFIG(.event=0xc0, .umask=0x01)) { 3194 X86_CONFIG(.event=0xc0, .umask=0x01)) {
3195 if (left < 128) 3195 if (left < 128)
3196 left = 128; 3196 left = 128;
3197 left &= ~0x3fu; 3197 left &= ~0x3fULL;
3198 } 3198 }
3199 return left; 3199 return left;
3200} 3200}
@@ -3460,7 +3460,7 @@ static __initconst const struct x86_pmu core_pmu = {
3460 .event_map = intel_pmu_event_map, 3460 .event_map = intel_pmu_event_map,
3461 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 3461 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3462 .apic = 1, 3462 .apic = 1,
3463 .free_running_flags = PEBS_FREERUNNING_FLAGS, 3463 .large_pebs_flags = LARGE_PEBS_FLAGS,
3464 3464
3465 /* 3465 /*
3466 * Intel PMCs cannot be accessed sanely above 32-bit width, 3466 * Intel PMCs cannot be accessed sanely above 32-bit width,
@@ -3502,7 +3502,7 @@ static __initconst const struct x86_pmu intel_pmu = {
3502 .event_map = intel_pmu_event_map, 3502 .event_map = intel_pmu_event_map,
3503 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 3503 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3504 .apic = 1, 3504 .apic = 1,
3505 .free_running_flags = PEBS_FREERUNNING_FLAGS, 3505 .large_pebs_flags = LARGE_PEBS_FLAGS,
3506 /* 3506 /*
3507 * Intel PMCs cannot be accessed sanely above 32 bit width, 3507 * Intel PMCs cannot be accessed sanely above 32 bit width,
3508 * so we install an artificial 1<<31 period regardless of 3508 * so we install an artificial 1<<31 period regardless of
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 18c25ab28557..d8015235ba76 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -935,7 +935,7 @@ void intel_pmu_pebs_add(struct perf_event *event)
935 bool needed_cb = pebs_needs_sched_cb(cpuc); 935 bool needed_cb = pebs_needs_sched_cb(cpuc);
936 936
937 cpuc->n_pebs++; 937 cpuc->n_pebs++;
938 if (hwc->flags & PERF_X86_EVENT_FREERUNNING) 938 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
939 cpuc->n_large_pebs++; 939 cpuc->n_large_pebs++;
940 940
941 pebs_update_state(needed_cb, cpuc, event->ctx->pmu); 941 pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
@@ -975,7 +975,7 @@ void intel_pmu_pebs_del(struct perf_event *event)
975 bool needed_cb = pebs_needs_sched_cb(cpuc); 975 bool needed_cb = pebs_needs_sched_cb(cpuc);
976 976
977 cpuc->n_pebs--; 977 cpuc->n_pebs--;
978 if (hwc->flags & PERF_X86_EVENT_FREERUNNING) 978 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
979 cpuc->n_large_pebs--; 979 cpuc->n_large_pebs--;
980 980
981 pebs_update_state(needed_cb, cpuc, event->ctx->pmu); 981 pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
@@ -1530,7 +1530,7 @@ void __init intel_ds_init(void)
1530 x86_pmu.pebs_record_size = 1530 x86_pmu.pebs_record_size =
1531 sizeof(struct pebs_record_skl); 1531 sizeof(struct pebs_record_skl);
1532 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; 1532 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
1533 x86_pmu.free_running_flags |= PERF_SAMPLE_TIME; 1533 x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
1534 break; 1534 break;
1535 1535
1536 default: 1536 default:
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 22ec65bc033a..c98b943e58b4 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -3343,6 +3343,7 @@ static struct extra_reg skx_uncore_cha_extra_regs[] = {
3343 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4), 3343 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3344 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8), 3344 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3345 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8), 3345 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3346 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3346 EVENT_EXTRA_END 3347 EVENT_EXTRA_END
3347}; 3348};
3348 3349
@@ -3562,24 +3563,27 @@ static struct intel_uncore_type *skx_msr_uncores[] = {
3562 NULL, 3563 NULL,
3563}; 3564};
3564 3565
3566/*
3567 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3568 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3569 */
3570#define SKX_CAPID6 0x9c
3571#define SKX_CHA_BIT_MASK GENMASK(27, 0)
3572
3565static int skx_count_chabox(void) 3573static int skx_count_chabox(void)
3566{ 3574{
3567 struct pci_dev *chabox_dev = NULL; 3575 struct pci_dev *dev = NULL;
3568 int bus, count = 0; 3576 u32 val = 0;
3569 3577
3570 while (1) { 3578 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
3571 chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev); 3579 if (!dev)
3572 if (!chabox_dev) 3580 goto out;
3573 break;
3574 if (count == 0)
3575 bus = chabox_dev->bus->number;
3576 if (bus != chabox_dev->bus->number)
3577 break;
3578 count++;
3579 }
3580 3581
3581 pci_dev_put(chabox_dev); 3582 pci_read_config_dword(dev, SKX_CAPID6, &val);
3582 return count; 3583 val &= SKX_CHA_BIT_MASK;
3584out:
3585 pci_dev_put(dev);
3586 return hweight32(val);
3583} 3587}
3584 3588
3585void skx_uncore_cpu_init(void) 3589void skx_uncore_cpu_init(void)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 78f91ec1056e..39cd0615f04f 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -69,7 +69,7 @@ struct event_constraint {
69#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */ 69#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */
70#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */ 70#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */
71#define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */ 71#define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */
72#define PERF_X86_EVENT_FREERUNNING 0x0800 /* use freerunning PEBS */ 72#define PERF_X86_EVENT_LARGE_PEBS 0x0800 /* use large PEBS */
73 73
74 74
75struct amd_nb { 75struct amd_nb {
@@ -88,7 +88,7 @@ struct amd_nb {
88 * REGS_USER can be handled for events limited to ring 3. 88 * REGS_USER can be handled for events limited to ring 3.
89 * 89 *
90 */ 90 */
91#define PEBS_FREERUNNING_FLAGS \ 91#define LARGE_PEBS_FLAGS \
92 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ 92 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
93 PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ 93 PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
94 PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ 94 PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
@@ -608,7 +608,7 @@ struct x86_pmu {
608 struct event_constraint *pebs_constraints; 608 struct event_constraint *pebs_constraints;
609 void (*pebs_aliases)(struct perf_event *event); 609 void (*pebs_aliases)(struct perf_event *event);
610 int max_pebs_events; 610 int max_pebs_events;
611 unsigned long free_running_flags; 611 unsigned long large_pebs_flags;
612 612
613 /* 613 /*
614 * Intel LBR 614 * Intel LBR
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index e1259f043ae9..042b5e892ed1 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -52,11 +52,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
52#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \ 52#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
53 "lfence", X86_FEATURE_LFENCE_RDTSC) 53 "lfence", X86_FEATURE_LFENCE_RDTSC)
54 54
55#ifdef CONFIG_X86_PPRO_FENCE
56#define dma_rmb() rmb()
57#else
58#define dma_rmb() barrier() 55#define dma_rmb() barrier()
59#endif
60#define dma_wmb() barrier() 56#define dma_wmb() barrier()
61 57
62#ifdef CONFIG_X86_32 58#ifdef CONFIG_X86_32
@@ -68,30 +64,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
68#define __smp_wmb() barrier() 64#define __smp_wmb() barrier()
69#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) 65#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
70 66
71#if defined(CONFIG_X86_PPRO_FENCE)
72
73/*
74 * For this option x86 doesn't have a strong TSO memory
75 * model and we should fall back to full barriers.
76 */
77
78#define __smp_store_release(p, v) \
79do { \
80 compiletime_assert_atomic_type(*p); \
81 __smp_mb(); \
82 WRITE_ONCE(*p, v); \
83} while (0)
84
85#define __smp_load_acquire(p) \
86({ \
87 typeof(*p) ___p1 = READ_ONCE(*p); \
88 compiletime_assert_atomic_type(*p); \
89 __smp_mb(); \
90 ___p1; \
91})
92
93#else /* regular x86 TSO memory ordering */
94
95#define __smp_store_release(p, v) \ 67#define __smp_store_release(p, v) \
96do { \ 68do { \
97 compiletime_assert_atomic_type(*p); \ 69 compiletime_assert_atomic_type(*p); \
@@ -107,8 +79,6 @@ do { \
107 ___p1; \ 79 ___p1; \
108}) 80})
109 81
110#endif
111
112/* Atomic operations are already serializing on x86 */ 82/* Atomic operations are already serializing on x86 */
113#define __smp_mb__before_atomic() barrier() 83#define __smp_mb__before_atomic() barrier()
114#define __smp_mb__after_atomic() barrier() 84#define __smp_mb__after_atomic() barrier()
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 95e948627fd0..f6e5b9375d8c 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -232,21 +232,6 @@ extern void set_iounmap_nonlazy(void);
232 */ 232 */
233#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) 233#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
234 234
235/*
236 * Cache management
237 *
238 * This needed for two cases
239 * 1. Out of order aware processors
240 * 2. Accidentally out of order processors (PPro errata #51)
241 */
242
243static inline void flush_write_buffers(void)
244{
245#if defined(CONFIG_X86_PPRO_FENCE)
246 asm volatile("lock; addl $0,0(%%esp)": : :"memory");
247#endif
248}
249
250#endif /* __KERNEL__ */ 235#endif /* __KERNEL__ */
251 236
252extern void native_io_delay(void); 237extern void native_io_delay(void);
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 56d99be3706a..50bee5fe1140 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -160,7 +160,6 @@ static const __initconst struct idt_data early_pf_idts[] = {
160 */ 160 */
161static const __initconst struct idt_data dbg_idts[] = { 161static const __initconst struct idt_data dbg_idts[] = {
162 INTG(X86_TRAP_DB, debug), 162 INTG(X86_TRAP_DB, debug),
163 INTG(X86_TRAP_BP, int3),
164}; 163};
165#endif 164#endif
166 165
@@ -183,7 +182,6 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
183static const __initconst struct idt_data ist_idts[] = { 182static const __initconst struct idt_data ist_idts[] = {
184 ISTG(X86_TRAP_DB, debug, DEBUG_STACK), 183 ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
185 ISTG(X86_TRAP_NMI, nmi, NMI_STACK), 184 ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
186 SISTG(X86_TRAP_BP, int3, DEBUG_STACK),
187 ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK), 185 ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK),
188#ifdef CONFIG_X86_MCE 186#ifdef CONFIG_X86_MCE
189 ISTG(X86_TRAP_MC, &machine_check, MCE_STACK), 187 ISTG(X86_TRAP_MC, &machine_check, MCE_STACK),
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index bc1a27280c4b..fae86e36e399 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -546,7 +546,7 @@ static void __init kvm_guest_init(void)
546 } 546 }
547 547
548 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && 548 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
549 !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) 549 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
550 pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; 550 pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
551 551
552 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 552 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -635,7 +635,7 @@ static __init int kvm_setup_pv_tlb_flush(void)
635 int cpu; 635 int cpu;
636 636
637 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && 637 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
638 !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 638 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
639 for_each_possible_cpu(cpu) { 639 for_each_possible_cpu(cpu) {
640 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), 640 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
641 GFP_KERNEL, cpu_to_node(cpu)); 641 GFP_KERNEL, cpu_to_node(cpu));
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 618285e475c6..ac7ea3a8242f 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -37,7 +37,6 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
37 WARN_ON(size == 0); 37 WARN_ON(size == 0);
38 if (!check_addr("map_single", dev, bus, size)) 38 if (!check_addr("map_single", dev, bus, size))
39 return NOMMU_MAPPING_ERROR; 39 return NOMMU_MAPPING_ERROR;
40 flush_write_buffers();
41 return bus; 40 return bus;
42} 41}
43 42
@@ -72,25 +71,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
72 return 0; 71 return 0;
73 s->dma_length = s->length; 72 s->dma_length = s->length;
74 } 73 }
75 flush_write_buffers();
76 return nents; 74 return nents;
77} 75}
78 76
79static void nommu_sync_single_for_device(struct device *dev,
80 dma_addr_t addr, size_t size,
81 enum dma_data_direction dir)
82{
83 flush_write_buffers();
84}
85
86
87static void nommu_sync_sg_for_device(struct device *dev,
88 struct scatterlist *sg, int nelems,
89 enum dma_data_direction dir)
90{
91 flush_write_buffers();
92}
93
94static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr) 77static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
95{ 78{
96 return dma_addr == NOMMU_MAPPING_ERROR; 79 return dma_addr == NOMMU_MAPPING_ERROR;
@@ -101,8 +84,6 @@ const struct dma_map_ops nommu_dma_ops = {
101 .free = dma_generic_free_coherent, 84 .free = dma_generic_free_coherent,
102 .map_sg = nommu_map_sg, 85 .map_sg = nommu_map_sg,
103 .map_page = nommu_map_page, 86 .map_page = nommu_map_page,
104 .sync_single_for_device = nommu_sync_single_for_device,
105 .sync_sg_for_device = nommu_sync_sg_for_device,
106 .is_phys = 1, 87 .is_phys = 1,
107 .mapping_error = nommu_mapping_error, 88 .mapping_error = nommu_mapping_error,
108 .dma_supported = x86_dma_supported, 89 .dma_supported = x86_dma_supported,
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 3d9b2308e7fa..03f3d7695dac 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -577,7 +577,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
577} 577}
578NOKPROBE_SYMBOL(do_general_protection); 578NOKPROBE_SYMBOL(do_general_protection);
579 579
580/* May run on IST stack. */
581dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) 580dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
582{ 581{
583#ifdef CONFIG_DYNAMIC_FTRACE 582#ifdef CONFIG_DYNAMIC_FTRACE
@@ -592,6 +591,13 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
592 if (poke_int3_handler(regs)) 591 if (poke_int3_handler(regs))
593 return; 592 return;
594 593
594 /*
595 * Use ist_enter despite the fact that we don't use an IST stack.
596 * We can be called from a kprobe in non-CONTEXT_KERNEL kernel
597 * mode or even during context tracking state changes.
598 *
599 * This means that we can't schedule. That's okay.
600 */
595 ist_enter(regs); 601 ist_enter(regs);
596 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); 602 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
597#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 603#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
@@ -609,15 +615,10 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
609 SIGTRAP) == NOTIFY_STOP) 615 SIGTRAP) == NOTIFY_STOP)
610 goto exit; 616 goto exit;
611 617
612 /*
613 * Let others (NMI) know that the debug stack is in use
614 * as we may switch to the interrupt stack.
615 */
616 debug_stack_usage_inc();
617 cond_local_irq_enable(regs); 618 cond_local_irq_enable(regs);
618 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); 619 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
619 cond_local_irq_disable(regs); 620 cond_local_irq_disable(regs);
620 debug_stack_usage_dec(); 621
621exit: 622exit:
622 ist_exit(regs); 623 ist_exit(regs);
623} 624}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2d87603f9179..657c93409042 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10711,6 +10711,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10711 struct vcpu_vmx *vmx = to_vmx(vcpu); 10711 struct vcpu_vmx *vmx = to_vmx(vcpu);
10712 u32 exec_control, vmcs12_exec_ctrl; 10712 u32 exec_control, vmcs12_exec_ctrl;
10713 10713
10714 if (vmx->nested.dirty_vmcs12) {
10715 prepare_vmcs02_full(vcpu, vmcs12, from_vmentry);
10716 vmx->nested.dirty_vmcs12 = false;
10717 }
10718
10714 /* 10719 /*
10715 * First, the fields that are shadowed. This must be kept in sync 10720 * First, the fields that are shadowed. This must be kept in sync
10716 * with vmx_shadow_fields.h. 10721 * with vmx_shadow_fields.h.
@@ -10948,11 +10953,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10948 /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ 10953 /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
10949 vmx_set_efer(vcpu, vcpu->arch.efer); 10954 vmx_set_efer(vcpu, vcpu->arch.efer);
10950 10955
10951 if (vmx->nested.dirty_vmcs12) {
10952 prepare_vmcs02_full(vcpu, vmcs12, from_vmentry);
10953 vmx->nested.dirty_vmcs12 = false;
10954 }
10955
10956 /* Shadow page tables on either EPT or shadow page tables. */ 10956 /* Shadow page tables on either EPT or shadow page tables. */
10957 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), 10957 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
10958 entry_failure_code)) 10958 entry_failure_code))
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index c310a8284358..f9cfbc0d1f33 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -227,7 +227,7 @@ int __init efi_alloc_page_tables(void)
227 if (!pud) { 227 if (!pud) {
228 if (CONFIG_PGTABLE_LEVELS > 4) 228 if (CONFIG_PGTABLE_LEVELS > 4)
229 free_page((unsigned long) pgd_page_vaddr(*pgd)); 229 free_page((unsigned long) pgd_page_vaddr(*pgd));
230 free_page((unsigned long)efi_pgd); 230 free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
231 return -ENOMEM; 231 return -ENOMEM;
232 } 232 }
233 233
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index b7d73400ea29..f31e5d903161 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -30,11 +30,7 @@
30 30
31#endif /* CONFIG_X86_32 */ 31#endif /* CONFIG_X86_32 */
32 32
33#ifdef CONFIG_X86_PPRO_FENCE
34#define dma_rmb() rmb()
35#else /* CONFIG_X86_PPRO_FENCE */
36#define dma_rmb() barrier() 33#define dma_rmb() barrier()
37#endif /* CONFIG_X86_PPRO_FENCE */
38#define dma_wmb() barrier() 34#define dma_wmb() barrier()
39 35
40#include <asm-generic/barrier.h> 36#include <asm-generic/barrier.h>
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 98a3a43484c8..44abb8a0a5e5 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -3147,7 +3147,7 @@ static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3147 " Size of Tx Buffer : %u\n" 3147 " Size of Tx Buffer : %u\n"
3148 " Number of Rx Buffer: %u\n" 3148 " Number of Rx Buffer: %u\n"
3149 " Size of Rx Buffer : %u\n" 3149 " Size of Rx Buffer : %u\n"
3150 " Packets Receiverd : %u\n" 3150 " Packets Received : %u\n"
3151 " Packets Transmitted: %u\n" 3151 " Packets Transmitted: %u\n"
3152 " Cells Received : %u\n" 3152 " Cells Received : %u\n"
3153 " Cells Transmitted : %u\n" 3153 " Cells Transmitted : %u\n"
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index 4dbb30cf94ac..b922db90939a 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -118,14 +118,15 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
118 spin_lock_irqsave(&dmamux->lock, flags); 118 spin_lock_irqsave(&dmamux->lock, flags);
119 mux->chan_id = find_first_zero_bit(dmamux->dma_inuse, 119 mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
120 dmamux->dma_requests); 120 dmamux->dma_requests);
121 set_bit(mux->chan_id, dmamux->dma_inuse);
122 spin_unlock_irqrestore(&dmamux->lock, flags);
123 121
124 if (mux->chan_id == dmamux->dma_requests) { 122 if (mux->chan_id == dmamux->dma_requests) {
123 spin_unlock_irqrestore(&dmamux->lock, flags);
125 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 124 dev_err(&pdev->dev, "Run out of free DMA requests\n");
126 ret = -ENOMEM; 125 ret = -ENOMEM;
127 goto error; 126 goto error_chan_id;
128 } 127 }
128 set_bit(mux->chan_id, dmamux->dma_inuse);
129 spin_unlock_irqrestore(&dmamux->lock, flags);
129 130
130 /* Look for DMA Master */ 131 /* Look for DMA Master */
131 for (i = 1, min = 0, max = dmamux->dma_reqs[i]; 132 for (i = 1, min = 0, max = dmamux->dma_reqs[i];
@@ -173,6 +174,8 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
173 174
174error: 175error:
175 clear_bit(mux->chan_id, dmamux->dma_inuse); 176 clear_bit(mux->chan_id, dmamux->dma_inuse);
177
178error_chan_id:
176 kfree(mux); 179 kfree(mux);
177 return ERR_PTR(ret); 180 return ERR_PTR(ret);
178} 181}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index b21285afa4ea..1bd5f26b3f00 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -821,13 +821,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
821 pr_warn("Can't create new usermode queue because %d queues were already created\n", 821 pr_warn("Can't create new usermode queue because %d queues were already created\n",
822 dqm->total_queue_count); 822 dqm->total_queue_count);
823 retval = -EPERM; 823 retval = -EPERM;
824 goto out; 824 goto out_unlock;
825 } 825 }
826 826
827 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { 827 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
828 retval = allocate_sdma_queue(dqm, &q->sdma_id); 828 retval = allocate_sdma_queue(dqm, &q->sdma_id);
829 if (retval) 829 if (retval)
830 goto out; 830 goto out_unlock;
831 q->properties.sdma_queue_id = 831 q->properties.sdma_queue_id =
832 q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE; 832 q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
833 q->properties.sdma_engine_id = 833 q->properties.sdma_engine_id =
@@ -838,7 +838,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
838 838
839 if (!mqd) { 839 if (!mqd) {
840 retval = -ENOMEM; 840 retval = -ENOMEM;
841 goto out; 841 goto out_deallocate_sdma_queue;
842 } 842 }
843 843
844 dqm->asic_ops.init_sdma_vm(dqm, q, qpd); 844 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
@@ -848,7 +848,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
848 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, 848 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
849 &q->gart_mqd_addr, &q->properties); 849 &q->gart_mqd_addr, &q->properties);
850 if (retval) 850 if (retval)
851 goto out; 851 goto out_deallocate_sdma_queue;
852 852
853 list_add(&q->list, &qpd->queues_list); 853 list_add(&q->list, &qpd->queues_list);
854 qpd->queue_count++; 854 qpd->queue_count++;
@@ -869,7 +869,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
869 pr_debug("Total of %d queues are accountable so far\n", 869 pr_debug("Total of %d queues are accountable so far\n",
870 dqm->total_queue_count); 870 dqm->total_queue_count);
871 871
872out: 872 mutex_unlock(&dqm->lock);
873 return retval;
874
875out_deallocate_sdma_queue:
876 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
877 deallocate_sdma_queue(dqm, q->sdma_id);
878out_unlock:
873 mutex_unlock(&dqm->lock); 879 mutex_unlock(&dqm->lock);
874 return retval; 880 return retval;
875} 881}
@@ -1188,8 +1194,10 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
1188 1194
1189 /* Clear all user mode queues */ 1195 /* Clear all user mode queues */
1190 list_for_each_entry(q, &qpd->queues_list, list) { 1196 list_for_each_entry(q, &qpd->queues_list, list) {
1191 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 1197 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1192 dqm->sdma_queue_count--; 1198 dqm->sdma_queue_count--;
1199 deallocate_sdma_queue(dqm, q->sdma_id);
1200 }
1193 1201
1194 if (q->properties.is_active) 1202 if (q->properties.is_active)
1195 dqm->queue_count--; 1203 dqm->queue_count--;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 0ecbd1f9b606..0c3bc00978f7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -188,8 +188,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
188 packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base; 188 packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
189 packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit; 189 packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
190 190
191 /* TODO: scratch support */ 191 packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
192 packet->sh_hidden_private_base_vmid = 0;
193 192
194 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); 193 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
195 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); 194 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index fbffe1948b3b..90b25ce363ca 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2009,9 +2009,9 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
2009 .coupled_pm = false, 2009 .coupled_pm = false,
2010 .has_nvdisplay = false, 2010 .has_nvdisplay = false,
2011 .num_primary_formats = ARRAY_SIZE(tegra124_primary_formats), 2011 .num_primary_formats = ARRAY_SIZE(tegra124_primary_formats),
2012 .primary_formats = tegra114_primary_formats, 2012 .primary_formats = tegra124_primary_formats,
2013 .num_overlay_formats = ARRAY_SIZE(tegra124_overlay_formats), 2013 .num_overlay_formats = ARRAY_SIZE(tegra124_overlay_formats),
2014 .overlay_formats = tegra114_overlay_formats, 2014 .overlay_formats = tegra124_overlay_formats,
2015}; 2015};
2016 2016
2017static const struct tegra_dc_soc_info tegra210_dc_soc_info = { 2017static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
@@ -2160,7 +2160,7 @@ static int tegra_dc_couple(struct tegra_dc *dc)
2160 struct device_link *link; 2160 struct device_link *link;
2161 struct device *partner; 2161 struct device *partner;
2162 2162
2163 partner = driver_find_device(dc->dev->driver, NULL, 0, 2163 partner = driver_find_device(dc->dev->driver, NULL, NULL,
2164 tegra_dc_match_by_pipe); 2164 tegra_dc_match_by_pipe);
2165 if (!partner) 2165 if (!partner)
2166 return -EPROBE_DEFER; 2166 return -EPROBE_DEFER;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 50e071444a5c..8699bb969e7e 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -417,13 +417,24 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
417} 417}
418EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); 418EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
419 419
420/* How many bytes were read in this iterator cycle */
421static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
422 u32 start_read_index)
423{
424 if (rbi->priv_read_index >= start_read_index)
425 return rbi->priv_read_index - start_read_index;
426 else
427 return rbi->ring_datasize - start_read_index +
428 rbi->priv_read_index;
429}
430
420/* 431/*
421 * Update host ring buffer after iterating over packets. 432 * Update host ring buffer after iterating over packets.
422 */ 433 */
423void hv_pkt_iter_close(struct vmbus_channel *channel) 434void hv_pkt_iter_close(struct vmbus_channel *channel)
424{ 435{
425 struct hv_ring_buffer_info *rbi = &channel->inbound; 436 struct hv_ring_buffer_info *rbi = &channel->inbound;
426 u32 orig_write_sz = hv_get_bytes_to_write(rbi); 437 u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
427 438
428 /* 439 /*
429 * Make sure all reads are done before we update the read index since 440 * Make sure all reads are done before we update the read index since
@@ -431,8 +442,12 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
431 * is updated. 442 * is updated.
432 */ 443 */
433 virt_rmb(); 444 virt_rmb();
445 start_read_index = rbi->ring_buffer->read_index;
434 rbi->ring_buffer->read_index = rbi->priv_read_index; 446 rbi->ring_buffer->read_index = rbi->priv_read_index;
435 447
448 if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
449 return;
450
436 /* 451 /*
437 * Issue a full memory barrier before making the signaling decision. 452 * Issue a full memory barrier before making the signaling decision.
438 * Here is the reason for having this barrier: 453 * Here is the reason for having this barrier:
@@ -446,26 +461,29 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
446 */ 461 */
447 virt_mb(); 462 virt_mb();
448 463
449 /* If host has disabled notifications then skip */ 464 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
450 if (rbi->ring_buffer->interrupt_mask) 465 if (!pending_sz)
451 return; 466 return;
452 467
453 if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) { 468 /*
454 u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); 469 * Ensure the read of write_index in hv_get_bytes_to_write()
470 * happens after the read of pending_send_sz.
471 */
472 virt_rmb();
473 curr_write_sz = hv_get_bytes_to_write(rbi);
474 bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
455 475
456 /* 476 /*
457 * If there was space before we began iteration, 477 * If there was space before we began iteration,
458 * then host was not blocked. Also handles case where 478 * then host was not blocked.
459 * pending_sz is zero then host has nothing pending 479 */
460 * and does not need to be signaled.
461 */
462 if (orig_write_sz > pending_sz)
463 return;
464 480
465 /* If pending write will not fit, don't give false hope. */ 481 if (curr_write_sz - bytes_read > pending_sz)
466 if (hv_get_bytes_to_write(rbi) < pending_sz) 482 return;
467 return; 483
468 } 484 /* If pending write will not fit, don't give false hope. */
485 if (curr_write_sz <= pending_sz)
486 return;
469 487
470 vmbus_setevent(channel); 488 vmbus_setevent(channel);
471} 489}
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index b445b3bb0bb1..f273e28c39db 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -888,6 +888,11 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
888 } 888 }
889 889
890 setup = of_device_get_match_data(&pdev->dev); 890 setup = of_device_get_match_data(&pdev->dev);
891 if (!setup) {
892 dev_err(&pdev->dev, "Can't get device data\n");
893 ret = -ENODEV;
894 goto clk_free;
895 }
891 i2c_dev->setup = *setup; 896 i2c_dev->setup = *setup;
892 897
893 ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns", 898 ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 6fe995cf16a6..3e6fd5a8ac5b 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -920,6 +920,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
920int st_accel_common_probe(struct iio_dev *indio_dev) 920int st_accel_common_probe(struct iio_dev *indio_dev)
921{ 921{
922 struct st_sensor_data *adata = iio_priv(indio_dev); 922 struct st_sensor_data *adata = iio_priv(indio_dev);
923 struct st_sensors_platform_data *pdata =
924 (struct st_sensors_platform_data *)adata->dev->platform_data;
923 int irq = adata->get_irq_data_ready(indio_dev); 925 int irq = adata->get_irq_data_ready(indio_dev);
924 int err; 926 int err;
925 927
@@ -946,7 +948,10 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
946 &adata->sensor_settings->fs.fs_avl[0]; 948 &adata->sensor_settings->fs.fs_avl[0];
947 adata->odr = adata->sensor_settings->odr.odr_avl[0].hz; 949 adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
948 950
949 err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data); 951 if (!pdata)
952 pdata = (struct st_sensors_platform_data *)&default_accel_pdata;
953
954 err = st_sensors_init_sensor(indio_dev, pdata);
950 if (err < 0) 955 if (err < 0)
951 goto st_accel_power_off; 956 goto st_accel_power_off;
952 957
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 29fa7736d80c..ede955d9b2a4 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -462,8 +462,10 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev)
462 regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val); 462 regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val);
463 } while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--); 463 } while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--);
464 464
465 if (timeout < 0) 465 if (timeout < 0) {
466 mutex_unlock(&indio_dev->mlock);
466 return -ETIMEDOUT; 467 return -ETIMEDOUT;
468 }
467 } 469 }
468 470
469 return 0; 471 return 0;
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index daa026d6a94f..01422d11753c 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -54,7 +54,6 @@ struct stm32_dfsdm_adc {
54 struct stm32_dfsdm *dfsdm; 54 struct stm32_dfsdm *dfsdm;
55 const struct stm32_dfsdm_dev_data *dev_data; 55 const struct stm32_dfsdm_dev_data *dev_data;
56 unsigned int fl_id; 56 unsigned int fl_id;
57 unsigned int ch_id;
58 57
59 /* ADC specific */ 58 /* ADC specific */
60 unsigned int oversamp; 59 unsigned int oversamp;
@@ -384,7 +383,7 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
384{ 383{
385 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); 384 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
386 struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; 385 struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
387 struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id]; 386 struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
388 unsigned int sample_freq = adc->sample_freq; 387 unsigned int sample_freq = adc->sample_freq;
389 unsigned int spi_freq; 388 unsigned int spi_freq;
390 int ret; 389 int ret;
@@ -419,18 +418,20 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
419 return len; 418 return len;
420} 419}
421 420
422static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, bool dma) 421static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc,
422 const struct iio_chan_spec *chan,
423 bool dma)
423{ 424{
424 struct regmap *regmap = adc->dfsdm->regmap; 425 struct regmap *regmap = adc->dfsdm->regmap;
425 int ret; 426 int ret;
426 unsigned int dma_en = 0, cont_en = 0; 427 unsigned int dma_en = 0, cont_en = 0;
427 428
428 ret = stm32_dfsdm_start_channel(adc->dfsdm, adc->ch_id); 429 ret = stm32_dfsdm_start_channel(adc->dfsdm, chan->channel);
429 if (ret < 0) 430 if (ret < 0)
430 return ret; 431 return ret;
431 432
432 ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id, 433 ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id,
433 adc->ch_id); 434 chan->channel);
434 if (ret < 0) 435 if (ret < 0)
435 goto stop_channels; 436 goto stop_channels;
436 437
@@ -464,12 +465,13 @@ stop_channels:
464 465
465 regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id), 466 regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
466 DFSDM_CR1_RCONT_MASK, 0); 467 DFSDM_CR1_RCONT_MASK, 0);
467 stm32_dfsdm_stop_channel(adc->dfsdm, adc->fl_id); 468 stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel);
468 469
469 return ret; 470 return ret;
470} 471}
471 472
472static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc) 473static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc,
474 const struct iio_chan_spec *chan)
473{ 475{
474 struct regmap *regmap = adc->dfsdm->regmap; 476 struct regmap *regmap = adc->dfsdm->regmap;
475 477
@@ -482,7 +484,7 @@ static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)
482 regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id), 484 regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
483 DFSDM_CR1_RCONT_MASK, 0); 485 DFSDM_CR1_RCONT_MASK, 0);
484 486
485 stm32_dfsdm_stop_channel(adc->dfsdm, adc->ch_id); 487 stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel);
486} 488}
487 489
488static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev, 490static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev,
@@ -609,6 +611,7 @@ static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
609static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) 611static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
610{ 612{
611 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); 613 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
614 const struct iio_chan_spec *chan = &indio_dev->channels[0];
612 int ret; 615 int ret;
613 616
614 /* Reset adc buffer index */ 617 /* Reset adc buffer index */
@@ -618,7 +621,7 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
618 if (ret < 0) 621 if (ret < 0)
619 return ret; 622 return ret;
620 623
621 ret = stm32_dfsdm_start_conv(adc, true); 624 ret = stm32_dfsdm_start_conv(adc, chan, true);
622 if (ret) { 625 if (ret) {
623 dev_err(&indio_dev->dev, "Can't start conversion\n"); 626 dev_err(&indio_dev->dev, "Can't start conversion\n");
624 goto stop_dfsdm; 627 goto stop_dfsdm;
@@ -635,7 +638,7 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
635 return 0; 638 return 0;
636 639
637err_stop_conv: 640err_stop_conv:
638 stm32_dfsdm_stop_conv(adc); 641 stm32_dfsdm_stop_conv(adc, chan);
639stop_dfsdm: 642stop_dfsdm:
640 stm32_dfsdm_stop_dfsdm(adc->dfsdm); 643 stm32_dfsdm_stop_dfsdm(adc->dfsdm);
641 644
@@ -645,11 +648,12 @@ stop_dfsdm:
645static int stm32_dfsdm_predisable(struct iio_dev *indio_dev) 648static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
646{ 649{
647 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); 650 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
651 const struct iio_chan_spec *chan = &indio_dev->channels[0];
648 652
649 if (adc->dma_chan) 653 if (adc->dma_chan)
650 dmaengine_terminate_all(adc->dma_chan); 654 dmaengine_terminate_all(adc->dma_chan);
651 655
652 stm32_dfsdm_stop_conv(adc); 656 stm32_dfsdm_stop_conv(adc, chan);
653 657
654 stm32_dfsdm_stop_dfsdm(adc->dfsdm); 658 stm32_dfsdm_stop_dfsdm(adc->dfsdm);
655 659
@@ -730,7 +734,7 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
730 if (ret < 0) 734 if (ret < 0)
731 goto stop_dfsdm; 735 goto stop_dfsdm;
732 736
733 ret = stm32_dfsdm_start_conv(adc, false); 737 ret = stm32_dfsdm_start_conv(adc, chan, false);
734 if (ret < 0) { 738 if (ret < 0) {
735 regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id), 739 regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
736 DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0)); 740 DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
@@ -751,7 +755,7 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
751 else 755 else
752 ret = IIO_VAL_INT; 756 ret = IIO_VAL_INT;
753 757
754 stm32_dfsdm_stop_conv(adc); 758 stm32_dfsdm_stop_conv(adc, chan);
755 759
756stop_dfsdm: 760stop_dfsdm:
757 stm32_dfsdm_stop_dfsdm(adc->dfsdm); 761 stm32_dfsdm_stop_dfsdm(adc->dfsdm);
@@ -765,7 +769,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
765{ 769{
766 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); 770 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
767 struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; 771 struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
768 struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id]; 772 struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
769 unsigned int spi_freq = adc->spi_freq; 773 unsigned int spi_freq = adc->spi_freq;
770 int ret = -EINVAL; 774 int ret = -EINVAL;
771 775
@@ -972,7 +976,6 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
972 } 976 }
973 ch->scan_type.realbits = 24; 977 ch->scan_type.realbits = 24;
974 ch->scan_type.storagebits = 32; 978 ch->scan_type.storagebits = 32;
975 adc->ch_id = ch->channel;
976 979
977 return stm32_dfsdm_chan_configure(adc->dfsdm, 980 return stm32_dfsdm_chan_configure(adc->dfsdm,
978 &adc->dfsdm->ch_list[ch->channel]); 981 &adc->dfsdm->ch_list[ch->channel]);
@@ -1001,7 +1004,7 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
1001 } 1004 }
1002 ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ); 1005 ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ);
1003 1006
1004 d_ch = &adc->dfsdm->ch_list[adc->ch_id]; 1007 d_ch = &adc->dfsdm->ch_list[ch->channel];
1005 if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL) 1008 if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
1006 adc->spi_freq = adc->dfsdm->spi_master_freq; 1009 adc->spi_freq = adc->dfsdm->spi_master_freq;
1007 1010
@@ -1042,8 +1045,8 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
1042 return -ENOMEM; 1045 return -ENOMEM;
1043 1046
1044 for (chan_idx = 0; chan_idx < num_ch; chan_idx++) { 1047 for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
1045 ch->scan_index = chan_idx; 1048 ch[chan_idx].scan_index = chan_idx;
1046 ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch); 1049 ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &ch[chan_idx]);
1047 if (ret < 0) { 1050 if (ret < 0) {
1048 dev_err(&indio_dev->dev, "Channels init failed\n"); 1051 dev_err(&indio_dev->dev, "Channels init failed\n");
1049 return ret; 1052 return ret;
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index 6290332cfd3f..e50efdcc41ff 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -83,7 +83,7 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
83{ 83{
84 struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm); 84 struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
85 struct device *dev = &priv->pdev->dev; 85 struct device *dev = &priv->pdev->dev;
86 unsigned int clk_div = priv->spi_clk_out_div; 86 unsigned int clk_div = priv->spi_clk_out_div, clk_src;
87 int ret; 87 int ret;
88 88
89 if (atomic_inc_return(&priv->n_active_ch) == 1) { 89 if (atomic_inc_return(&priv->n_active_ch) == 1) {
@@ -100,6 +100,14 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
100 } 100 }
101 } 101 }
102 102
103 /* select clock source, e.g. 0 for "dfsdm" or 1 for "audio" */
104 clk_src = priv->aclk ? 1 : 0;
105 ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
106 DFSDM_CHCFGR1_CKOUTSRC_MASK,
107 DFSDM_CHCFGR1_CKOUTSRC(clk_src));
108 if (ret < 0)
109 goto disable_aclk;
110
103 /* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */ 111 /* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */
104 ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0), 112 ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
105 DFSDM_CHCFGR1_CKOUTDIV_MASK, 113 DFSDM_CHCFGR1_CKOUTDIV_MASK,
@@ -274,7 +282,7 @@ static int stm32_dfsdm_probe(struct platform_device *pdev)
274 282
275 dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm", 283 dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm",
276 dfsdm->base, 284 dfsdm->base,
277 &stm32h7_dfsdm_regmap_cfg); 285 dev_data->regmap_cfg);
278 if (IS_ERR(dfsdm->regmap)) { 286 if (IS_ERR(dfsdm->regmap)) {
279 ret = PTR_ERR(dfsdm->regmap); 287 ret = PTR_ERR(dfsdm->regmap);
280 dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n", 288 dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index fbe2431f5b81..1ea9f5513b02 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -133,6 +133,9 @@ static int ccs811_start_sensor_application(struct i2c_client *client)
133 if (ret < 0) 133 if (ret < 0)
134 return ret; 134 return ret;
135 135
136 if ((ret & CCS811_STATUS_FW_MODE_APPLICATION))
137 return 0;
138
136 if ((ret & CCS811_STATUS_APP_VALID_MASK) != 139 if ((ret & CCS811_STATUS_APP_VALID_MASK) !=
137 CCS811_STATUS_APP_VALID_LOADED) 140 CCS811_STATUS_APP_VALID_LOADED)
138 return -EIO; 141 return -EIO;
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 349e5c713c03..4ddb6cf7d401 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -640,7 +640,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
640 press_data->sensor_settings->drdy_irq.int2.addr)) 640 press_data->sensor_settings->drdy_irq.int2.addr))
641 pdata = (struct st_sensors_platform_data *)&default_press_pdata; 641 pdata = (struct st_sensors_platform_data *)&default_press_pdata;
642 642
643 err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data); 643 err = st_sensors_init_sensor(indio_dev, pdata);
644 if (err < 0) 644 if (err < 0)
645 goto st_press_power_off; 645 goto st_press_power_off;
646 646
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 9183d148d644..cb1d2ab13c66 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -207,6 +207,22 @@ int rdma_addr_size(struct sockaddr *addr)
207} 207}
208EXPORT_SYMBOL(rdma_addr_size); 208EXPORT_SYMBOL(rdma_addr_size);
209 209
210int rdma_addr_size_in6(struct sockaddr_in6 *addr)
211{
212 int ret = rdma_addr_size((struct sockaddr *) addr);
213
214 return ret <= sizeof(*addr) ? ret : 0;
215}
216EXPORT_SYMBOL(rdma_addr_size_in6);
217
218int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
219{
220 int ret = rdma_addr_size((struct sockaddr *) addr);
221
222 return ret <= sizeof(*addr) ? ret : 0;
223}
224EXPORT_SYMBOL(rdma_addr_size_kss);
225
210static struct rdma_addr_client self; 226static struct rdma_addr_client self;
211 227
212void rdma_addr_register_client(struct rdma_addr_client *client) 228void rdma_addr_register_client(struct rdma_addr_client *client)
@@ -586,6 +602,15 @@ static void process_one_req(struct work_struct *_work)
586 list_del(&req->list); 602 list_del(&req->list);
587 mutex_unlock(&lock); 603 mutex_unlock(&lock);
588 604
605 /*
606 * Although the work will normally have been canceled by the
607 * workqueue, it can still be requeued as long as it is on the
608 * req_list, so it could have been requeued before we grabbed &lock.
609 * We need to cancel it after it is removed from req_list to really be
610 * sure it is safe to free.
611 */
612 cancel_delayed_work(&req->work);
613
589 req->callback(req->status, (struct sockaddr *)&req->src_addr, 614 req->callback(req->status, (struct sockaddr *)&req->src_addr,
590 req->addr, req->context); 615 req->addr, req->context);
591 put_client(req->client); 616 put_client(req->client);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index bb065c9449be..b7459cf524e4 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -290,6 +290,7 @@ void ib_dealloc_device(struct ib_device *device)
290{ 290{
291 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED && 291 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
292 device->reg_state != IB_DEV_UNINITIALIZED); 292 device->reg_state != IB_DEV_UNINITIALIZED);
293 rdma_restrack_clean(&device->res);
293 put_device(&device->dev); 294 put_device(&device->dev);
294} 295}
295EXPORT_SYMBOL(ib_dealloc_device); 296EXPORT_SYMBOL(ib_dealloc_device);
@@ -600,8 +601,6 @@ void ib_unregister_device(struct ib_device *device)
600 } 601 }
601 up_read(&lists_rwsem); 602 up_read(&lists_rwsem);
602 603
603 rdma_restrack_clean(&device->res);
604
605 ib_device_unregister_rdmacg(device); 604 ib_device_unregister_rdmacg(device);
606 ib_device_unregister_sysfs(device); 605 ib_device_unregister_sysfs(device);
607 606
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index e5a1e7d81326..d933336d7e01 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -632,6 +632,9 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
632 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 632 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
633 return -EFAULT; 633 return -EFAULT;
634 634
635 if (!rdma_addr_size_in6(&cmd.addr))
636 return -EINVAL;
637
635 ctx = ucma_get_ctx(file, cmd.id); 638 ctx = ucma_get_ctx(file, cmd.id);
636 if (IS_ERR(ctx)) 639 if (IS_ERR(ctx))
637 return PTR_ERR(ctx); 640 return PTR_ERR(ctx);
@@ -645,22 +648,21 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
645 int in_len, int out_len) 648 int in_len, int out_len)
646{ 649{
647 struct rdma_ucm_bind cmd; 650 struct rdma_ucm_bind cmd;
648 struct sockaddr *addr;
649 struct ucma_context *ctx; 651 struct ucma_context *ctx;
650 int ret; 652 int ret;
651 653
652 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 654 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
653 return -EFAULT; 655 return -EFAULT;
654 656
655 addr = (struct sockaddr *) &cmd.addr; 657 if (cmd.reserved || !cmd.addr_size ||
656 if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr))) 658 cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
657 return -EINVAL; 659 return -EINVAL;
658 660
659 ctx = ucma_get_ctx(file, cmd.id); 661 ctx = ucma_get_ctx(file, cmd.id);
660 if (IS_ERR(ctx)) 662 if (IS_ERR(ctx))
661 return PTR_ERR(ctx); 663 return PTR_ERR(ctx);
662 664
663 ret = rdma_bind_addr(ctx->cm_id, addr); 665 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
664 ucma_put_ctx(ctx); 666 ucma_put_ctx(ctx);
665 return ret; 667 return ret;
666} 668}
@@ -670,23 +672,22 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
670 int in_len, int out_len) 672 int in_len, int out_len)
671{ 673{
672 struct rdma_ucm_resolve_ip cmd; 674 struct rdma_ucm_resolve_ip cmd;
673 struct sockaddr *src, *dst;
674 struct ucma_context *ctx; 675 struct ucma_context *ctx;
675 int ret; 676 int ret;
676 677
677 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 678 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
678 return -EFAULT; 679 return -EFAULT;
679 680
680 src = (struct sockaddr *) &cmd.src_addr; 681 if (!rdma_addr_size_in6(&cmd.src_addr) ||
681 dst = (struct sockaddr *) &cmd.dst_addr; 682 !rdma_addr_size_in6(&cmd.dst_addr))
682 if (!rdma_addr_size(src) || !rdma_addr_size(dst))
683 return -EINVAL; 683 return -EINVAL;
684 684
685 ctx = ucma_get_ctx(file, cmd.id); 685 ctx = ucma_get_ctx(file, cmd.id);
686 if (IS_ERR(ctx)) 686 if (IS_ERR(ctx))
687 return PTR_ERR(ctx); 687 return PTR_ERR(ctx);
688 688
689 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); 689 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
690 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
690 ucma_put_ctx(ctx); 691 ucma_put_ctx(ctx);
691 return ret; 692 return ret;
692} 693}
@@ -696,24 +697,23 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
696 int in_len, int out_len) 697 int in_len, int out_len)
697{ 698{
698 struct rdma_ucm_resolve_addr cmd; 699 struct rdma_ucm_resolve_addr cmd;
699 struct sockaddr *src, *dst;
700 struct ucma_context *ctx; 700 struct ucma_context *ctx;
701 int ret; 701 int ret;
702 702
703 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 703 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
704 return -EFAULT; 704 return -EFAULT;
705 705
706 src = (struct sockaddr *) &cmd.src_addr; 706 if (cmd.reserved ||
707 dst = (struct sockaddr *) &cmd.dst_addr; 707 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
708 if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) || 708 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
709 !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
710 return -EINVAL; 709 return -EINVAL;
711 710
712 ctx = ucma_get_ctx(file, cmd.id); 711 ctx = ucma_get_ctx(file, cmd.id);
713 if (IS_ERR(ctx)) 712 if (IS_ERR(ctx))
714 return PTR_ERR(ctx); 713 return PTR_ERR(ctx);
715 714
716 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); 715 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
716 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
717 ucma_put_ctx(ctx); 717 ucma_put_ctx(ctx);
718 return ret; 718 return ret;
719} 719}
@@ -1166,6 +1166,11 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1166 if (IS_ERR(ctx)) 1166 if (IS_ERR(ctx))
1167 return PTR_ERR(ctx); 1167 return PTR_ERR(ctx);
1168 1168
1169 if (!ctx->cm_id->device) {
1170 ret = -EINVAL;
1171 goto out;
1172 }
1173
1169 resp.qp_attr_mask = 0; 1174 resp.qp_attr_mask = 0;
1170 memset(&qp_attr, 0, sizeof qp_attr); 1175 memset(&qp_attr, 0, sizeof qp_attr);
1171 qp_attr.qp_state = cmd.qp_state; 1176 qp_attr.qp_state = cmd.qp_state;
@@ -1307,7 +1312,7 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1307 if (IS_ERR(ctx)) 1312 if (IS_ERR(ctx))
1308 return PTR_ERR(ctx); 1313 return PTR_ERR(ctx);
1309 1314
1310 if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) 1315 if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1311 return -EINVAL; 1316 return -EINVAL;
1312 1317
1313 optval = memdup_user((void __user *) (unsigned long) cmd.optval, 1318 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
@@ -1331,7 +1336,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1331{ 1336{
1332 struct rdma_ucm_notify cmd; 1337 struct rdma_ucm_notify cmd;
1333 struct ucma_context *ctx; 1338 struct ucma_context *ctx;
1334 int ret; 1339 int ret = -EINVAL;
1335 1340
1336 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1341 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1337 return -EFAULT; 1342 return -EFAULT;
@@ -1340,7 +1345,9 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1340 if (IS_ERR(ctx)) 1345 if (IS_ERR(ctx))
1341 return PTR_ERR(ctx); 1346 return PTR_ERR(ctx);
1342 1347
1343 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); 1348 if (ctx->cm_id->device)
1349 ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
1350
1344 ucma_put_ctx(ctx); 1351 ucma_put_ctx(ctx);
1345 return ret; 1352 return ret;
1346} 1353}
@@ -1426,7 +1433,7 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1426 join_cmd.response = cmd.response; 1433 join_cmd.response = cmd.response;
1427 join_cmd.uid = cmd.uid; 1434 join_cmd.uid = cmd.uid;
1428 join_cmd.id = cmd.id; 1435 join_cmd.id = cmd.id;
1429 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); 1436 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
1430 if (!join_cmd.addr_size) 1437 if (!join_cmd.addr_size)
1431 return -EINVAL; 1438 return -EINVAL;
1432 1439
@@ -1445,7 +1452,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
1445 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1452 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1446 return -EFAULT; 1453 return -EFAULT;
1447 1454
1448 if (!rdma_addr_size((struct sockaddr *)&cmd.addr)) 1455 if (!rdma_addr_size_kss(&cmd.addr))
1449 return -EINVAL; 1456 return -EINVAL;
1450 1457
1451 return ucma_process_join(file, &cmd, out_len); 1458 return ucma_process_join(file, &cmd, out_len);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index db2ff352d75f..ec638778661c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -4383,7 +4383,7 @@ err_dma_alloc_buf:
4383 eq->l0_dma = 0; 4383 eq->l0_dma = 0;
4384 4384
4385 if (mhop_num == 1) 4385 if (mhop_num == 1)
4386 for (i -= i; i >= 0; i--) 4386 for (i -= 1; i >= 0; i--)
4387 dma_free_coherent(dev, buf_chk_sz, eq->buf[i], 4387 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
4388 eq->buf_dma[i]); 4388 eq->buf_dma[i]);
4389 else if (mhop_num == 2) { 4389 else if (mhop_num == 2) {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 390e4375647e..071fd9a7b919 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3482,9 +3482,12 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev)
3482 if (err) 3482 if (err)
3483 mlx5_ib_warn(dev, "mr cache cleanup failed\n"); 3483 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
3484 3484
3485 mlx5_ib_destroy_qp(dev->umrc.qp); 3485 if (dev->umrc.qp)
3486 ib_free_cq(dev->umrc.cq); 3486 mlx5_ib_destroy_qp(dev->umrc.qp);
3487 ib_dealloc_pd(dev->umrc.pd); 3487 if (dev->umrc.cq)
3488 ib_free_cq(dev->umrc.cq);
3489 if (dev->umrc.pd)
3490 ib_dealloc_pd(dev->umrc.pd);
3488} 3491}
3489 3492
3490enum { 3493enum {
@@ -3586,12 +3589,15 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
3586 3589
3587error_4: 3590error_4:
3588 mlx5_ib_destroy_qp(qp); 3591 mlx5_ib_destroy_qp(qp);
3592 dev->umrc.qp = NULL;
3589 3593
3590error_3: 3594error_3:
3591 ib_free_cq(cq); 3595 ib_free_cq(cq);
3596 dev->umrc.cq = NULL;
3592 3597
3593error_2: 3598error_2:
3594 ib_dealloc_pd(pd); 3599 ib_dealloc_pd(pd);
3600 dev->umrc.pd = NULL;
3595 3601
3596error_0: 3602error_0:
3597 kfree(attr); 3603 kfree(attr);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 95a36e9ea552..654bc31bc428 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -740,6 +740,9 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
740{ 740{
741 int i; 741 int i;
742 742
743 if (!dev->cache.wq)
744 return 0;
745
743 dev->cache.stopped = 1; 746 dev->cache.stopped = 1;
744 flush_workqueue(dev->cache.wq); 747 flush_workqueue(dev->cache.wq);
745 748
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index eb32abb0099a..f9a645c869ce 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -833,7 +833,8 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
833 833
834 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); 834 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
835 if (!dev->num_cnq) { 835 if (!dev->num_cnq) {
836 DP_ERR(dev, "not enough CNQ resources.\n"); 836 DP_ERR(dev, "Failed. At least one CNQ is required.\n");
837 rc = -ENOMEM;
837 goto init_err; 838 goto init_err;
838 } 839 }
839 840
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 7d51ef47667f..f9c3cc71f5c0 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1841,14 +1841,15 @@ static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1841 1841
1842static int qedr_update_qp_state(struct qedr_dev *dev, 1842static int qedr_update_qp_state(struct qedr_dev *dev,
1843 struct qedr_qp *qp, 1843 struct qedr_qp *qp,
1844 enum qed_roce_qp_state cur_state,
1844 enum qed_roce_qp_state new_state) 1845 enum qed_roce_qp_state new_state)
1845{ 1846{
1846 int status = 0; 1847 int status = 0;
1847 1848
1848 if (new_state == qp->state) 1849 if (new_state == cur_state)
1849 return 0; 1850 return 0;
1850 1851
1851 switch (qp->state) { 1852 switch (cur_state) {
1852 case QED_ROCE_QP_STATE_RESET: 1853 case QED_ROCE_QP_STATE_RESET:
1853 switch (new_state) { 1854 switch (new_state) {
1854 case QED_ROCE_QP_STATE_INIT: 1855 case QED_ROCE_QP_STATE_INIT:
@@ -1955,6 +1956,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1955 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev); 1956 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1956 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); 1957 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1957 enum ib_qp_state old_qp_state, new_qp_state; 1958 enum ib_qp_state old_qp_state, new_qp_state;
1959 enum qed_roce_qp_state cur_state;
1958 int rc = 0; 1960 int rc = 0;
1959 1961
1960 DP_DEBUG(dev, QEDR_MSG_QP, 1962 DP_DEBUG(dev, QEDR_MSG_QP,
@@ -2086,18 +2088,23 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2086 SET_FIELD(qp_params.modify_flags, 2088 SET_FIELD(qp_params.modify_flags,
2087 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1); 2089 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2088 2090
2089 qp_params.ack_timeout = attr->timeout; 2091 /* The received timeout value is an exponent used like this:
2090 if (attr->timeout) { 2092 * "12.7.34 LOCAL ACK TIMEOUT
2091 u32 temp; 2093 * Value representing the transport (ACK) timeout for use by
2092 2094 * the remote, expressed as: 4.096 * 2^timeout [usec]"
2093 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000; 2095 * The FW expects timeout in msec so we need to divide the usec
2094 /* FW requires [msec] */ 2096 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2095 qp_params.ack_timeout = temp; 2097 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2096 } else { 2098 * The value of zero means infinite so we use a 'max_t' to make
2097 /* Infinite */ 2099 * sure that sub 1 msec values will be configured as 1 msec.
2100 */
2101 if (attr->timeout)
2102 qp_params.ack_timeout =
2103 1 << max_t(int, attr->timeout - 8, 0);
2104 else
2098 qp_params.ack_timeout = 0; 2105 qp_params.ack_timeout = 0;
2099 }
2100 } 2106 }
2107
2101 if (attr_mask & IB_QP_RETRY_CNT) { 2108 if (attr_mask & IB_QP_RETRY_CNT) {
2102 SET_FIELD(qp_params.modify_flags, 2109 SET_FIELD(qp_params.modify_flags,
2103 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1); 2110 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
@@ -2170,13 +2177,25 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2170 qp->dest_qp_num = attr->dest_qp_num; 2177 qp->dest_qp_num = attr->dest_qp_num;
2171 } 2178 }
2172 2179
2180 cur_state = qp->state;
2181
2182 /* Update the QP state before the actual ramrod to prevent a race with
2183 * fast path. Modifying the QP state to error will cause the device to
2184 * flush the CQEs and while polling the flushed CQEs will considered as
2185 * a potential issue if the QP isn't in error state.
2186 */
2187 if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2188 !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2189 qp->state = QED_ROCE_QP_STATE_ERR;
2190
2173 if (qp->qp_type != IB_QPT_GSI) 2191 if (qp->qp_type != IB_QPT_GSI)
2174 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx, 2192 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2175 qp->qed_qp, &qp_params); 2193 qp->qed_qp, &qp_params);
2176 2194
2177 if (attr_mask & IB_QP_STATE) { 2195 if (attr_mask & IB_QP_STATE) {
2178 if ((qp->qp_type != IB_QPT_GSI) && (!udata)) 2196 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2179 rc = qedr_update_qp_state(dev, qp, qp_params.new_state); 2197 rc = qedr_update_qp_state(dev, qp, cur_state,
2198 qp_params.new_state);
2180 qp->state = qp_params.new_state; 2199 qp->state = qp_params.new_state;
2181 } 2200 }
2182 2201
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index a05a560d3cba..a6b7baf31cdd 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -887,7 +887,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
887 887
888 q = bdev_get_queue(p->path.dev->bdev); 888 q = bdev_get_queue(p->path.dev->bdev);
889 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); 889 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
890 if (attached_handler_name) { 890 if (attached_handler_name || m->hw_handler_name) {
891 INIT_DELAYED_WORK(&p->activate_path, activate_path_work); 891 INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
892 r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error); 892 r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
893 if (r) { 893 if (r) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 45328d8b2859..353ea0ede091 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -466,7 +466,7 @@ static int dm_get_bdev_for_ioctl(struct mapped_device *md,
466{ 466{
467 struct dm_target *tgt; 467 struct dm_target *tgt;
468 struct dm_table *map; 468 struct dm_table *map;
469 int srcu_idx, r; 469 int srcu_idx, r, r2;
470 470
471retry: 471retry:
472 r = -ENOTTY; 472 r = -ENOTTY;
@@ -492,9 +492,11 @@ retry:
492 goto out; 492 goto out;
493 493
494 bdgrab(*bdev); 494 bdgrab(*bdev);
495 r = blkdev_get(*bdev, *mode, _dm_claim_ptr); 495 r2 = blkdev_get(*bdev, *mode, _dm_claim_ptr);
496 if (r < 0) 496 if (r2 < 0) {
497 r = r2;
497 goto out; 498 goto out;
499 }
498 500
499 dm_put_live_table(md, srcu_idx); 501 dm_put_live_table(md, srcu_idx);
500 return r; 502 return r;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 372c074bb1b9..86c1a190d946 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -151,7 +151,7 @@ config DVB_MMAP
151 select VIDEOBUF2_VMALLOC 151 select VIDEOBUF2_VMALLOC
152 default n 152 default n
153 help 153 help
154 This option enables DVB experimental memory-mapped API, with 154 This option enables DVB experimental memory-mapped API, which
155 reduces the number of context switches to read DVB buffers, as 155 reduces the number of context switches to read DVB buffers, as
156 the buffers can use mmap() syscalls. 156 the buffers can use mmap() syscalls.
157 157
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
index 92f93a880015..aba488cd0e64 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -172,16 +172,13 @@ static irqreturn_t tegra_cec_irq_handler(int irq, void *data)
172 } 172 }
173 } 173 }
174 174
175 if (status & (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN | 175 if (status & TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED) {
176 TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED |
177 TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED |
178 TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)) {
179 cec_write(cec, TEGRA_CEC_INT_STAT, 176 cec_write(cec, TEGRA_CEC_INT_STAT,
180 (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN | 177 TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED);
181 TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED | 178 cec->rx_done = false;
182 TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED | 179 cec->rx_buf_cnt = 0;
183 TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)); 180 }
184 } else if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) { 181 if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) {
185 u32 v; 182 u32 v;
186 183
187 cec_write(cec, TEGRA_CEC_INT_STAT, 184 cec_write(cec, TEGRA_CEC_INT_STAT,
@@ -255,7 +252,7 @@ static int tegra_cec_adap_enable(struct cec_adapter *adap, bool enable)
255 TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED | 252 TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED |
256 TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED | 253 TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED |
257 TEGRA_CEC_INT_MASK_RX_REGISTER_FULL | 254 TEGRA_CEC_INT_MASK_RX_REGISTER_FULL |
258 TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN); 255 TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED);
259 256
260 cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE); 257 cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE);
261 return 0; 258 return 0;
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 7c0b27d132b1..b479bd81120b 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1889,6 +1889,8 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
1889 do { 1889 do {
1890 uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi); 1890 uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
1891 mask = (1 << (cfi->device_type * 8)) - 1; 1891 mask = (1 << (cfi->device_type * 8)) - 1;
1892 if (ofs >= map->size)
1893 return 0;
1892 result = map_read(map, base + ofs); 1894 result = map_read(map, base + ofs);
1893 bank++; 1895 bank++;
1894 } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION); 1896 } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index de8c902059b8..7d80a8bb96fe 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -479,7 +479,7 @@ static int shrink_ecclayout(struct mtd_info *mtd,
479 for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { 479 for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
480 u32 eccpos; 480 u32 eccpos;
481 481
482 ret = mtd_ooblayout_ecc(mtd, section, &oobregion); 482 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
483 if (ret < 0) { 483 if (ret < 0) {
484 if (ret != -ERANGE) 484 if (ret != -ERANGE)
485 return ret; 485 return ret;
@@ -526,7 +526,7 @@ static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
526 for (i = 0; i < ARRAY_SIZE(to->eccpos);) { 526 for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
527 u32 eccpos; 527 u32 eccpos;
528 528
529 ret = mtd_ooblayout_ecc(mtd, section, &oobregion); 529 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
530 if (ret < 0) { 530 if (ret < 0) {
531 if (ret != -ERANGE) 531 if (ret != -ERANGE)
532 return ret; 532 return ret;
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index fcbe4fd6e684..ca0a70389ba9 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -426,7 +426,7 @@ static int get_strength(struct atmel_pmecc_user *user)
426 426
427static int get_sectorsize(struct atmel_pmecc_user *user) 427static int get_sectorsize(struct atmel_pmecc_user *user)
428{ 428{
429 return user->cache.cfg & PMECC_LOOKUP_TABLE_SIZE_1024 ? 1024 : 512; 429 return user->cache.cfg & PMECC_CFG_SECTOR1024 ? 1024 : 512;
430} 430}
431 431
432static void atmel_pmecc_gen_syndrome(struct atmel_pmecc_user *user, int sector) 432static void atmel_pmecc_gen_syndrome(struct atmel_pmecc_user *user, int sector)
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 4872a7ba6503..5a9c2f0020c2 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -173,14 +173,9 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
173 173
174/* returns nonzero if entire page is blank */ 174/* returns nonzero if entire page is blank */
175static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl, 175static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
176 u32 *eccstat, unsigned int bufnum) 176 u32 eccstat, unsigned int bufnum)
177{ 177{
178 u32 reg = eccstat[bufnum / 4]; 178 return (eccstat >> ((3 - bufnum % 4) * 8)) & 15;
179 int errors;
180
181 errors = (reg >> ((3 - bufnum % 4) * 8)) & 15;
182
183 return errors;
184} 179}
185 180
186/* 181/*
@@ -193,7 +188,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
193 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 188 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
194 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; 189 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
195 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 190 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
196 u32 eccstat[4]; 191 u32 eccstat;
197 int i; 192 int i;
198 193
199 /* set the chip select for NAND Transaction */ 194 /* set the chip select for NAND Transaction */
@@ -228,19 +223,17 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
228 if (nctrl->eccread) { 223 if (nctrl->eccread) {
229 int errors; 224 int errors;
230 int bufnum = nctrl->page & priv->bufnum_mask; 225 int bufnum = nctrl->page & priv->bufnum_mask;
231 int sector = bufnum * chip->ecc.steps; 226 int sector_start = bufnum * chip->ecc.steps;
232 int sector_end = sector + chip->ecc.steps - 1; 227 int sector_end = sector_start + chip->ecc.steps - 1;
233 __be32 *eccstat_regs; 228 __be32 *eccstat_regs;
234 229
235 if (ctrl->version >= FSL_IFC_VERSION_2_0_0) 230 eccstat_regs = ifc->ifc_nand.nand_eccstat;
236 eccstat_regs = ifc->ifc_nand.v2_nand_eccstat; 231 eccstat = ifc_in32(&eccstat_regs[sector_start / 4]);
237 else
238 eccstat_regs = ifc->ifc_nand.v1_nand_eccstat;
239 232
240 for (i = sector / 4; i <= sector_end / 4; i++) 233 for (i = sector_start; i <= sector_end; i++) {
241 eccstat[i] = ifc_in32(&eccstat_regs[i]); 234 if (i != sector_start && !(i % 4))
235 eccstat = ifc_in32(&eccstat_regs[i / 4]);
242 236
243 for (i = sector; i <= sector_end; i++) {
244 errors = check_read_ecc(mtd, ctrl, eccstat, i); 237 errors = check_read_ecc(mtd, ctrl, eccstat, i);
245 238
246 if (errors == 15) { 239 if (errors == 15) {
@@ -626,6 +619,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
626 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 619 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
627 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 620 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
628 u32 nand_fsr; 621 u32 nand_fsr;
622 int status;
629 623
630 /* Use READ_STATUS command, but wait for the device to be ready */ 624 /* Use READ_STATUS command, but wait for the device to be ready */
631 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 625 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
@@ -640,12 +634,12 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
640 fsl_ifc_run_command(mtd); 634 fsl_ifc_run_command(mtd);
641 635
642 nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); 636 nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
643 637 status = nand_fsr >> 24;
644 /* 638 /*
645 * The chip always seems to report that it is 639 * The chip always seems to report that it is
646 * write-protected, even when it is not. 640 * write-protected, even when it is not.
647 */ 641 */
648 return nand_fsr | NAND_STATUS_WP; 642 return status | NAND_STATUS_WP;
649} 643}
650 644
651/* 645/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c669554d70bb..b7b113018853 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1528,39 +1528,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1528 goto err_close; 1528 goto err_close;
1529 } 1529 }
1530 1530
1531 /* If the mode uses primary, then the following is handled by
1532 * bond_change_active_slave().
1533 */
1534 if (!bond_uses_primary(bond)) {
1535 /* set promiscuity level to new slave */
1536 if (bond_dev->flags & IFF_PROMISC) {
1537 res = dev_set_promiscuity(slave_dev, 1);
1538 if (res)
1539 goto err_close;
1540 }
1541
1542 /* set allmulti level to new slave */
1543 if (bond_dev->flags & IFF_ALLMULTI) {
1544 res = dev_set_allmulti(slave_dev, 1);
1545 if (res)
1546 goto err_close;
1547 }
1548
1549 netif_addr_lock_bh(bond_dev);
1550
1551 dev_mc_sync_multiple(slave_dev, bond_dev);
1552 dev_uc_sync_multiple(slave_dev, bond_dev);
1553
1554 netif_addr_unlock_bh(bond_dev);
1555 }
1556
1557 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1558 /* add lacpdu mc addr to mc list */
1559 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1560
1561 dev_mc_add(slave_dev, lacpdu_multicast);
1562 }
1563
1564 res = vlan_vids_add_by_dev(slave_dev, bond_dev); 1531 res = vlan_vids_add_by_dev(slave_dev, bond_dev);
1565 if (res) { 1532 if (res) {
1566 netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n", 1533 netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
@@ -1725,6 +1692,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1725 goto err_upper_unlink; 1692 goto err_upper_unlink;
1726 } 1693 }
1727 1694
1695 /* If the mode uses primary, then the following is handled by
1696 * bond_change_active_slave().
1697 */
1698 if (!bond_uses_primary(bond)) {
1699 /* set promiscuity level to new slave */
1700 if (bond_dev->flags & IFF_PROMISC) {
1701 res = dev_set_promiscuity(slave_dev, 1);
1702 if (res)
1703 goto err_sysfs_del;
1704 }
1705
1706 /* set allmulti level to new slave */
1707 if (bond_dev->flags & IFF_ALLMULTI) {
1708 res = dev_set_allmulti(slave_dev, 1);
1709 if (res) {
1710 if (bond_dev->flags & IFF_PROMISC)
1711 dev_set_promiscuity(slave_dev, -1);
1712 goto err_sysfs_del;
1713 }
1714 }
1715
1716 netif_addr_lock_bh(bond_dev);
1717 dev_mc_sync_multiple(slave_dev, bond_dev);
1718 dev_uc_sync_multiple(slave_dev, bond_dev);
1719 netif_addr_unlock_bh(bond_dev);
1720
1721 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1722 /* add lacpdu mc addr to mc list */
1723 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1724
1725 dev_mc_add(slave_dev, lacpdu_multicast);
1726 }
1727 }
1728
1728 bond->slave_cnt++; 1729 bond->slave_cnt++;
1729 bond_compute_features(bond); 1730 bond_compute_features(bond);
1730 bond_set_carrier(bond); 1731 bond_set_carrier(bond);
@@ -1748,6 +1749,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1748 return 0; 1749 return 0;
1749 1750
1750/* Undo stages on error */ 1751/* Undo stages on error */
1752err_sysfs_del:
1753 bond_sysfs_slave_del(new_slave);
1754
1751err_upper_unlink: 1755err_upper_unlink:
1752 bond_upper_dev_unlink(bond, new_slave); 1756 bond_upper_dev_unlink(bond, new_slave);
1753 1757
@@ -1755,9 +1759,6 @@ err_unregister:
1755 netdev_rx_handler_unregister(slave_dev); 1759 netdev_rx_handler_unregister(slave_dev);
1756 1760
1757err_detach: 1761err_detach:
1758 if (!bond_uses_primary(bond))
1759 bond_hw_addr_flush(bond_dev, slave_dev);
1760
1761 vlan_vids_del_by_dev(slave_dev, bond_dev); 1762 vlan_vids_del_by_dev(slave_dev, bond_dev);
1762 if (rcu_access_pointer(bond->primary_slave) == new_slave) 1763 if (rcu_access_pointer(bond->primary_slave) == new_slave)
1763 RCU_INIT_POINTER(bond->primary_slave, NULL); 1764 RCU_INIT_POINTER(bond->primary_slave, NULL);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 511ca134f13f..d244c41898dd 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1409,6 +1409,7 @@ static const struct of_device_id mt7530_of_match[] = {
1409 { .compatible = "mediatek,mt7530" }, 1409 { .compatible = "mediatek,mt7530" },
1410 { /* sentinel */ }, 1410 { /* sentinel */ },
1411}; 1411};
1412MODULE_DEVICE_TABLE(of, mt7530_of_match);
1412 1413
1413static struct mdio_driver mt7530_mdio_driver = { 1414static struct mdio_driver mt7530_mdio_driver = {
1414 .probe = mt7530_probe, 1415 .probe = mt7530_probe,
@@ -1424,4 +1425,3 @@ mdio_module_driver(mt7530_mdio_driver);
1424MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 1425MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
1425MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch"); 1426MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch");
1426MODULE_LICENSE("GPL"); 1427MODULE_LICENSE("GPL");
1427MODULE_ALIAS("platform:mediatek-mt7530");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a58acdb5eba3..b26bcdf4cd03 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1132,6 +1132,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
1132 } 1132 }
1133 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); 1133 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1134 1134
1135 q_map = 0;
1135 /* Enable all initialized RXQs. */ 1136 /* Enable all initialized RXQs. */
1136 for (queue = 0; queue < rxq_number; queue++) { 1137 for (queue = 0; queue < rxq_number; queue++) {
1137 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 1138 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 1a0c3bf86ead..752a72499b4f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -156,57 +156,63 @@ static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
156static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev) 156static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
157{ 157{
158 struct mlx4_en_priv *priv = netdev_priv(netdev); 158 struct mlx4_en_priv *priv = netdev_priv(netdev);
159 struct mlx4_en_port_profile *prof = priv->prof;
159 struct mlx4_en_dev *mdev = priv->mdev; 160 struct mlx4_en_dev *mdev = priv->mdev;
161 u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
160 162
161 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 163 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
162 return 1; 164 return 1;
163 165
164 if (priv->cee_config.pfc_state) { 166 if (priv->cee_config.pfc_state) {
165 int tc; 167 int tc;
168 rx_ppp = prof->rx_ppp;
169 tx_ppp = prof->tx_ppp;
166 170
167 priv->prof->rx_pause = 0;
168 priv->prof->tx_pause = 0;
169 for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { 171 for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
170 u8 tc_mask = 1 << tc; 172 u8 tc_mask = 1 << tc;
171 173
172 switch (priv->cee_config.dcb_pfc[tc]) { 174 switch (priv->cee_config.dcb_pfc[tc]) {
173 case pfc_disabled: 175 case pfc_disabled:
174 priv->prof->tx_ppp &= ~tc_mask; 176 tx_ppp &= ~tc_mask;
175 priv->prof->rx_ppp &= ~tc_mask; 177 rx_ppp &= ~tc_mask;
176 break; 178 break;
177 case pfc_enabled_full: 179 case pfc_enabled_full:
178 priv->prof->tx_ppp |= tc_mask; 180 tx_ppp |= tc_mask;
179 priv->prof->rx_ppp |= tc_mask; 181 rx_ppp |= tc_mask;
180 break; 182 break;
181 case pfc_enabled_tx: 183 case pfc_enabled_tx:
182 priv->prof->tx_ppp |= tc_mask; 184 tx_ppp |= tc_mask;
183 priv->prof->rx_ppp &= ~tc_mask; 185 rx_ppp &= ~tc_mask;
184 break; 186 break;
185 case pfc_enabled_rx: 187 case pfc_enabled_rx:
186 priv->prof->tx_ppp &= ~tc_mask; 188 tx_ppp &= ~tc_mask;
187 priv->prof->rx_ppp |= tc_mask; 189 rx_ppp |= tc_mask;
188 break; 190 break;
189 default: 191 default:
190 break; 192 break;
191 } 193 }
192 } 194 }
193 en_dbg(DRV, priv, "Set pfc on\n"); 195 rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause;
196 tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause;
194 } else { 197 } else {
195 priv->prof->rx_pause = 1; 198 rx_ppp = 0;
196 priv->prof->tx_pause = 1; 199 tx_ppp = 0;
197 en_dbg(DRV, priv, "Set pfc off\n"); 200 rx_pause = prof->rx_pause;
201 tx_pause = prof->tx_pause;
198 } 202 }
199 203
200 if (mlx4_SET_PORT_general(mdev->dev, priv->port, 204 if (mlx4_SET_PORT_general(mdev->dev, priv->port,
201 priv->rx_skb_size + ETH_FCS_LEN, 205 priv->rx_skb_size + ETH_FCS_LEN,
202 priv->prof->tx_pause, 206 tx_pause, tx_ppp, rx_pause, rx_ppp)) {
203 priv->prof->tx_ppp,
204 priv->prof->rx_pause,
205 priv->prof->rx_ppp)) {
206 en_err(priv, "Failed setting pause params\n"); 207 en_err(priv, "Failed setting pause params\n");
207 return 1; 208 return 1;
208 } 209 }
209 210
211 prof->tx_ppp = tx_ppp;
212 prof->rx_ppp = rx_ppp;
213 prof->tx_pause = tx_pause;
214 prof->rx_pause = rx_pause;
215
210 return 0; 216 return 0;
211} 217}
212 218
@@ -408,6 +414,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
408 struct mlx4_en_priv *priv = netdev_priv(dev); 414 struct mlx4_en_priv *priv = netdev_priv(dev);
409 struct mlx4_en_port_profile *prof = priv->prof; 415 struct mlx4_en_port_profile *prof = priv->prof;
410 struct mlx4_en_dev *mdev = priv->mdev; 416 struct mlx4_en_dev *mdev = priv->mdev;
417 u32 tx_pause, tx_ppp, rx_pause, rx_ppp;
411 int err; 418 int err;
412 419
413 en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n", 420 en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
@@ -416,23 +423,26 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
416 pfc->mbc, 423 pfc->mbc,
417 pfc->delay); 424 pfc->delay);
418 425
419 prof->rx_pause = !pfc->pfc_en; 426 rx_pause = prof->rx_pause && !pfc->pfc_en;
420 prof->tx_pause = !pfc->pfc_en; 427 tx_pause = prof->tx_pause && !pfc->pfc_en;
421 prof->rx_ppp = pfc->pfc_en; 428 rx_ppp = pfc->pfc_en;
422 prof->tx_ppp = pfc->pfc_en; 429 tx_ppp = pfc->pfc_en;
423 430
424 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 431 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
425 priv->rx_skb_size + ETH_FCS_LEN, 432 priv->rx_skb_size + ETH_FCS_LEN,
426 prof->tx_pause, 433 tx_pause, tx_ppp, rx_pause, rx_ppp);
427 prof->tx_ppp, 434 if (err) {
428 prof->rx_pause,
429 prof->rx_ppp);
430 if (err)
431 en_err(priv, "Failed setting pause params\n"); 435 en_err(priv, "Failed setting pause params\n");
432 else 436 return err;
433 mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, 437 }
434 prof->rx_ppp, prof->rx_pause, 438
435 prof->tx_ppp, prof->tx_pause); 439 mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
440 rx_ppp, rx_pause, tx_ppp, tx_pause);
441
442 prof->tx_ppp = tx_ppp;
443 prof->rx_ppp = rx_ppp;
444 prof->rx_pause = rx_pause;
445 prof->tx_pause = tx_pause;
436 446
437 return err; 447 return err;
438} 448}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 9a7a2f05ab35..a30a2e95d13f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1060,27 +1060,32 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
1060{ 1060{
1061 struct mlx4_en_priv *priv = netdev_priv(dev); 1061 struct mlx4_en_priv *priv = netdev_priv(dev);
1062 struct mlx4_en_dev *mdev = priv->mdev; 1062 struct mlx4_en_dev *mdev = priv->mdev;
1063 u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
1063 int err; 1064 int err;
1064 1065
1065 if (pause->autoneg) 1066 if (pause->autoneg)
1066 return -EINVAL; 1067 return -EINVAL;
1067 1068
1068 priv->prof->tx_pause = pause->tx_pause != 0; 1069 tx_pause = !!(pause->tx_pause);
1069 priv->prof->rx_pause = pause->rx_pause != 0; 1070 rx_pause = !!(pause->rx_pause);
1071 rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
1072 tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
1073
1070 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1074 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1071 priv->rx_skb_size + ETH_FCS_LEN, 1075 priv->rx_skb_size + ETH_FCS_LEN,
1072 priv->prof->tx_pause, 1076 tx_pause, tx_ppp, rx_pause, rx_ppp);
1073 priv->prof->tx_ppp, 1077 if (err) {
1074 priv->prof->rx_pause, 1078 en_err(priv, "Failed setting pause params, err = %d\n", err);
1075 priv->prof->rx_ppp); 1079 return err;
1076 if (err) 1080 }
1077 en_err(priv, "Failed setting pause params\n"); 1081
1078 else 1082 mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
1079 mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, 1083 rx_ppp, rx_pause, tx_ppp, tx_pause);
1080 priv->prof->rx_ppp, 1084
1081 priv->prof->rx_pause, 1085 priv->prof->tx_pause = tx_pause;
1082 priv->prof->tx_ppp, 1086 priv->prof->rx_pause = rx_pause;
1083 priv->prof->tx_pause); 1087 priv->prof->tx_ppp = tx_ppp;
1088 priv->prof->rx_ppp = rx_ppp;
1084 1089
1085 return err; 1090 return err;
1086} 1091}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 2c2965497ed3..d25e16d2c319 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -163,9 +163,9 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
163 params->udp_rss = 0; 163 params->udp_rss = 0;
164 } 164 }
165 for (i = 1; i <= MLX4_MAX_PORTS; i++) { 165 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
166 params->prof[i].rx_pause = 1; 166 params->prof[i].rx_pause = !(pfcrx || pfctx);
167 params->prof[i].rx_ppp = pfcrx; 167 params->prof[i].rx_ppp = pfcrx;
168 params->prof[i].tx_pause = 1; 168 params->prof[i].tx_pause = !(pfcrx || pfctx);
169 params->prof[i].tx_ppp = pfctx; 169 params->prof[i].tx_ppp = pfctx;
170 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; 170 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
171 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; 171 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 606a0e0beeae..29e50f787349 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -5088,6 +5088,7 @@ static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5088 &tracker->res_tree[RES_FS_RULE]); 5088 &tracker->res_tree[RES_FS_RULE]);
5089 list_del(&fs_rule->com.list); 5089 list_del(&fs_rule->com.list);
5090 spin_unlock_irq(mlx4_tlock(dev)); 5090 spin_unlock_irq(mlx4_tlock(dev));
5091 kfree(fs_rule->mirr_mbox);
5091 kfree(fs_rule); 5092 kfree(fs_rule);
5092 state = 0; 5093 state = 0;
5093 break; 5094 break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 25deaa5a534c..c032319f1cb9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -46,7 +46,7 @@ config MLX5_MPFS
46 46
47config MLX5_ESWITCH 47config MLX5_ESWITCH
48 bool "Mellanox Technologies MLX5 SRIOV E-Switch support" 48 bool "Mellanox Technologies MLX5 SRIOV E-Switch support"
49 depends on MLX5_CORE_EN 49 depends on MLX5_CORE_EN && NET_SWITCHDEV
50 default y 50 default y
51 ---help--- 51 ---help---
52 Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC. 52 Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index a87d46bc2299..37fd0245b6c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -409,6 +409,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
409 return mlx5e_ethtool_get_coalesce(priv, coal); 409 return mlx5e_ethtool_get_coalesce(priv, coal);
410} 410}
411 411
412#define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD
413#define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT
414
412static void 415static void
413mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) 416mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
414{ 417{
@@ -443,6 +446,20 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
443 if (!MLX5_CAP_GEN(mdev, cq_moderation)) 446 if (!MLX5_CAP_GEN(mdev, cq_moderation))
444 return -EOPNOTSUPP; 447 return -EOPNOTSUPP;
445 448
449 if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
450 coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
451 netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n",
452 __func__, MLX5E_MAX_COAL_TIME);
453 return -ERANGE;
454 }
455
456 if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
457 coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
458 netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n",
459 __func__, MLX5E_MAX_COAL_FRAMES);
460 return -ERANGE;
461 }
462
446 mutex_lock(&priv->state_lock); 463 mutex_lock(&priv->state_lock);
447 new_channels.params = priv->channels.params; 464 new_channels.params = priv->channels.params;
448 465
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 0339609cfa56..c71f4f10283b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2702,6 +2702,9 @@ int mlx5e_open(struct net_device *netdev)
2702 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); 2702 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
2703 mutex_unlock(&priv->state_lock); 2703 mutex_unlock(&priv->state_lock);
2704 2704
2705 if (mlx5e_vxlan_allowed(priv->mdev))
2706 udp_tunnel_get_rx_info(netdev);
2707
2705 return err; 2708 return err;
2706} 2709}
2707 2710
@@ -4225,7 +4228,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4225 } 4228 }
4226} 4229}
4227 4230
4228#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) 4231#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4229static const struct switchdev_ops mlx5e_switchdev_ops = { 4232static const struct switchdev_ops mlx5e_switchdev_ops = {
4230 .switchdev_port_attr_get = mlx5e_attr_get, 4233 .switchdev_port_attr_get = mlx5e_attr_get,
4231}; 4234};
@@ -4334,7 +4337,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
4334 4337
4335 mlx5e_set_netdev_dev_addr(netdev); 4338 mlx5e_set_netdev_dev_addr(netdev);
4336 4339
4337#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) 4340#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4338 if (MLX5_VPORT_MANAGER(mdev)) 4341 if (MLX5_VPORT_MANAGER(mdev))
4339 netdev->switchdev_ops = &mlx5e_switchdev_ops; 4342 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4340#endif 4343#endif
@@ -4493,12 +4496,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4493#ifdef CONFIG_MLX5_CORE_EN_DCB 4496#ifdef CONFIG_MLX5_CORE_EN_DCB
4494 mlx5e_dcbnl_init_app(priv); 4497 mlx5e_dcbnl_init_app(priv);
4495#endif 4498#endif
4496 /* Device already registered: sync netdev system state */
4497 if (mlx5e_vxlan_allowed(mdev)) {
4498 rtnl_lock();
4499 udp_tunnel_get_rx_info(netdev);
4500 rtnl_unlock();
4501 }
4502 4499
4503 queue_work(priv->wq, &priv->set_rx_mode_work); 4500 queue_work(priv->wq, &priv->set_rx_mode_work);
4504 4501
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 8e70fa9ef39a..d8f68e4d1018 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -44,6 +44,11 @@
44#include "en_tc.h" 44#include "en_tc.h"
45#include "fs_core.h" 45#include "fs_core.h"
46 46
47#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
48 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
49#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \
50 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)
51
47static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; 52static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
48 53
49static void mlx5e_rep_get_drvinfo(struct net_device *dev, 54static void mlx5e_rep_get_drvinfo(struct net_device *dev,
@@ -209,7 +214,7 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
209 214
210static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, 215static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
211 struct mlx5_eswitch_rep *rep, 216 struct mlx5_eswitch_rep *rep,
212 u16 *sqns_array, int sqns_num) 217 u32 *sqns_array, int sqns_num)
213{ 218{
214 struct mlx5_flow_handle *flow_rule; 219 struct mlx5_flow_handle *flow_rule;
215 struct mlx5e_rep_priv *rpriv; 220 struct mlx5e_rep_priv *rpriv;
@@ -255,9 +260,9 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
255 struct mlx5e_channel *c; 260 struct mlx5e_channel *c;
256 int n, tc, num_sqs = 0; 261 int n, tc, num_sqs = 0;
257 int err = -ENOMEM; 262 int err = -ENOMEM;
258 u16 *sqs; 263 u32 *sqs;
259 264
260 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL); 265 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
261 if (!sqs) 266 if (!sqs)
262 goto out; 267 goto out;
263 268
@@ -288,7 +293,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
288static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) 293static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
289{ 294{
290#if IS_ENABLED(CONFIG_IPV6) 295#if IS_ENABLED(CONFIG_IPV6)
291 unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms, 296 unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
292 DELAY_PROBE_TIME); 297 DELAY_PROBE_TIME);
293#else 298#else
294 unsigned long ipv6_interval = ~0UL; 299 unsigned long ipv6_interval = ~0UL;
@@ -424,7 +429,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
424 case NETEVENT_NEIGH_UPDATE: 429 case NETEVENT_NEIGH_UPDATE:
425 n = ptr; 430 n = ptr;
426#if IS_ENABLED(CONFIG_IPV6) 431#if IS_ENABLED(CONFIG_IPV6)
427 if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) 432 if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
428#else 433#else
429 if (n->tbl != &arp_tbl) 434 if (n->tbl != &arp_tbl)
430#endif 435#endif
@@ -472,7 +477,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
472 * done per device delay prob time parameter. 477 * done per device delay prob time parameter.
473 */ 478 */
474#if IS_ENABLED(CONFIG_IPV6) 479#if IS_ENABLED(CONFIG_IPV6)
475 if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl)) 480 if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
476#else 481#else
477 if (!p->dev || p->tbl != &arp_tbl) 482 if (!p->dev || p->tbl != &arp_tbl)
478#endif 483#endif
@@ -668,7 +673,6 @@ static int mlx5e_rep_open(struct net_device *dev)
668 struct mlx5e_priv *priv = netdev_priv(dev); 673 struct mlx5e_priv *priv = netdev_priv(dev);
669 struct mlx5e_rep_priv *rpriv = priv->ppriv; 674 struct mlx5e_rep_priv *rpriv = priv->ppriv;
670 struct mlx5_eswitch_rep *rep = rpriv->rep; 675 struct mlx5_eswitch_rep *rep = rpriv->rep;
671 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
672 int err; 676 int err;
673 677
674 mutex_lock(&priv->state_lock); 678 mutex_lock(&priv->state_lock);
@@ -676,8 +680,9 @@ static int mlx5e_rep_open(struct net_device *dev)
676 if (err) 680 if (err)
677 goto unlock; 681 goto unlock;
678 682
679 if (!mlx5_eswitch_set_vport_state(esw, rep->vport, 683 if (!mlx5_modify_vport_admin_state(priv->mdev,
680 MLX5_ESW_VPORT_ADMIN_STATE_UP)) 684 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
685 rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
681 netif_carrier_on(dev); 686 netif_carrier_on(dev);
682 687
683unlock: 688unlock:
@@ -690,11 +695,12 @@ static int mlx5e_rep_close(struct net_device *dev)
690 struct mlx5e_priv *priv = netdev_priv(dev); 695 struct mlx5e_priv *priv = netdev_priv(dev);
691 struct mlx5e_rep_priv *rpriv = priv->ppriv; 696 struct mlx5e_rep_priv *rpriv = priv->ppriv;
692 struct mlx5_eswitch_rep *rep = rpriv->rep; 697 struct mlx5_eswitch_rep *rep = rpriv->rep;
693 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
694 int ret; 698 int ret;
695 699
696 mutex_lock(&priv->state_lock); 700 mutex_lock(&priv->state_lock);
697 (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); 701 mlx5_modify_vport_admin_state(priv->mdev,
702 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
703 rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
698 ret = mlx5e_close_locked(dev); 704 ret = mlx5e_close_locked(dev);
699 mutex_unlock(&priv->state_lock); 705 mutex_unlock(&priv->state_lock);
700 return ret; 706 return ret;
@@ -878,9 +884,9 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
878 MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 884 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
879 885
880 params->hard_mtu = MLX5E_ETH_HARD_MTU; 886 params->hard_mtu = MLX5E_ETH_HARD_MTU;
881 params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 887 params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
882 params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; 888 params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
883 params->log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; 889 params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
884 890
885 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); 891 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
886 mlx5e_set_rx_cq_mode_params(params, cq_period_mode); 892 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
@@ -899,9 +905,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
899 905
900 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; 906 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
901 907
902#ifdef CONFIG_NET_SWITCHDEV
903 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; 908 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
904#endif
905 909
906 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; 910 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
907 netdev->hw_features |= NETIF_F_HW_TC; 911 netdev->hw_features |= NETIF_F_HW_TC;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3e4a7e81b67f..4197001f9801 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -964,7 +964,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
964 tbl = &arp_tbl; 964 tbl = &arp_tbl;
965#if IS_ENABLED(CONFIG_IPV6) 965#if IS_ENABLED(CONFIG_IPV6)
966 else if (m_neigh->family == AF_INET6) 966 else if (m_neigh->family == AF_INET6)
967 tbl = ipv6_stub->nd_tbl; 967 tbl = &nd_tbl;
968#endif 968#endif
969 else 969 else
970 return; 970 return;
@@ -2614,19 +2614,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
2614 if (err != -EAGAIN) 2614 if (err != -EAGAIN)
2615 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; 2615 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2616 2616
2617 if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
2618 !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
2619 kvfree(parse_attr);
2620
2617 err = rhashtable_insert_fast(&tc->ht, &flow->node, 2621 err = rhashtable_insert_fast(&tc->ht, &flow->node,
2618 tc->ht_params); 2622 tc->ht_params);
2619 if (err) 2623 if (err) {
2620 goto err_del_rule; 2624 mlx5e_tc_del_flow(priv, flow);
2625 kfree(flow);
2626 }
2621 2627
2622 if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
2623 !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
2624 kvfree(parse_attr);
2625 return err; 2628 return err;
2626 2629
2627err_del_rule:
2628 mlx5e_tc_del_flow(priv, flow);
2629
2630err_free: 2630err_free:
2631 kvfree(parse_attr); 2631 kvfree(parse_attr);
2632 kfree(flow); 2632 kfree(flow);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index a9ccd974c620..1904c0323d39 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1406,6 +1406,55 @@ mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1406 decap_fib_entry); 1406 decap_fib_entry);
1407} 1407}
1408 1408
1409static int
1410mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
1411 struct mlxsw_sp_vr *ul_vr, bool enable)
1412{
1413 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1414 struct mlxsw_sp_rif *rif = &lb_rif->common;
1415 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1416 char ritr_pl[MLXSW_REG_RITR_LEN];
1417 u32 saddr4;
1418
1419 switch (lb_cf.ul_protocol) {
1420 case MLXSW_SP_L3_PROTO_IPV4:
1421 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1422 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1423 rif->rif_index, rif->vr_id, rif->dev->mtu);
1424 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1425 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1426 ul_vr->id, saddr4, lb_cf.okey);
1427 break;
1428
1429 case MLXSW_SP_L3_PROTO_IPV6:
1430 return -EAFNOSUPPORT;
1431 }
1432
1433 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1434}
1435
1436static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1437 struct net_device *ol_dev)
1438{
1439 struct mlxsw_sp_ipip_entry *ipip_entry;
1440 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1441 struct mlxsw_sp_vr *ul_vr;
1442 int err = 0;
1443
1444 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1445 if (ipip_entry) {
1446 lb_rif = ipip_entry->ol_lb;
1447 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
1448 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
1449 if (err)
1450 goto out;
1451 lb_rif->common.mtu = ol_dev->mtu;
1452 }
1453
1454out:
1455 return err;
1456}
1457
1409static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp, 1458static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1410 struct net_device *ol_dev) 1459 struct net_device *ol_dev)
1411{ 1460{
@@ -1686,6 +1735,8 @@ int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1686 extack = info->extack; 1735 extack = info->extack;
1687 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp, 1736 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1688 ol_dev, extack); 1737 ol_dev, extack);
1738 case NETDEV_CHANGEMTU:
1739 return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1689 } 1740 }
1690 return 0; 1741 return 0;
1691} 1742}
@@ -6910,33 +6961,6 @@ mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6910} 6961}
6911 6962
6912static int 6963static int
6913mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6914 struct mlxsw_sp_vr *ul_vr, bool enable)
6915{
6916 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6917 struct mlxsw_sp_rif *rif = &lb_rif->common;
6918 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6919 char ritr_pl[MLXSW_REG_RITR_LEN];
6920 u32 saddr4;
6921
6922 switch (lb_cf.ul_protocol) {
6923 case MLXSW_SP_L3_PROTO_IPV4:
6924 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6925 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6926 rif->rif_index, rif->vr_id, rif->dev->mtu);
6927 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6928 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6929 ul_vr->id, saddr4, lb_cf.okey);
6930 break;
6931
6932 case MLXSW_SP_L3_PROTO_IPV6:
6933 return -EAFNOSUPPORT;
6934 }
6935
6936 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6937}
6938
6939static int
6940mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) 6964mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6941{ 6965{
6942 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); 6966 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 4b631e26f199..29b4e5f8c102 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -74,7 +74,9 @@ nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
74 74
75static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 75static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
76{ 76{
77 if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) { 77 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) {
78 pr_warn("instruction limit reached (%u NFP instructions)\n",
79 nfp_prog->prog_len);
78 nfp_prog->error = -ENOSPC; 80 nfp_prog->error = -ENOSPC;
79 return; 81 return;
80 } 82 }
@@ -2732,6 +2734,8 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
2732 err = cb(nfp_prog, meta); 2734 err = cb(nfp_prog, meta);
2733 if (err) 2735 if (err)
2734 return err; 2736 return err;
2737 if (nfp_prog->error)
2738 return nfp_prog->error;
2735 2739
2736 nfp_prog->n_translated++; 2740 nfp_prog->n_translated++;
2737 } 2741 }
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index dafc079ab6b9..14941303189d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -320,13 +320,11 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
320 barrier(); 320 barrier();
321 writel(txq->tx_db.raw, txq->doorbell_addr); 321 writel(txq->tx_db.raw, txq->doorbell_addr);
322 322
323 /* mmiowb is needed to synchronize doorbell writes from more than one 323 /* Fence required to flush the write combined buffer, since another
324 * processor. It guarantees that the write arrives to the device before 324 * CPU may write to the same doorbell address and data may be lost
325 * the queue lock is released and another start_xmit is called (possibly 325 * due to relaxed order nature of write combined bar.
326 * on another CPU). Without this barrier, the next doorbell can bypass
327 * this doorbell. This is applicable to IA64/Altix systems.
328 */ 326 */
329 mmiowb(); 327 wmb();
330} 328}
331 329
332static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, 330static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
@@ -1249,16 +1247,10 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
1249 1247
1250 csum_flag = qede_check_csum(parse_flag); 1248 csum_flag = qede_check_csum(parse_flag);
1251 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { 1249 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1252 if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) { 1250 if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag))
1253 rxq->rx_ip_frags++; 1251 rxq->rx_ip_frags++;
1254 } else { 1252 else
1255 DP_NOTICE(edev,
1256 "CQE has error, flags = %x, dropping incoming packet\n",
1257 parse_flag);
1258 rxq->rx_hw_errors++; 1253 rxq->rx_hw_errors++;
1259 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1260 return 0;
1261 }
1262 } 1254 }
1263 1255
1264 /* Basic validation passed; Need to prepare an SKB. This would also 1256 /* Basic validation passed; Need to prepare an SKB. This would also
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 630409e0337f..604ae78381ae 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8378,12 +8378,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8378 if (!tp->counters) 8378 if (!tp->counters)
8379 return -ENOMEM; 8379 return -ENOMEM;
8380 8380
8381 pci_set_drvdata(pdev, dev);
8382
8381 rc = register_netdev(dev); 8383 rc = register_netdev(dev);
8382 if (rc < 0) 8384 if (rc < 0)
8383 return rc; 8385 return rc;
8384 8386
8385 pci_set_drvdata(pdev, dev);
8386
8387 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", 8387 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
8388 rtl_chip_infos[chipset].name, tp->mmio_addr, dev->dev_addr, 8388 rtl_chip_infos[chipset].name, tp->mmio_addr, dev->dev_addr,
8389 (u32)(RTL_R32(tp, TxConfig) & 0x9cf0f8ff), 8389 (u32)(RTL_R32(tp, TxConfig) & 0x9cf0f8ff),
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index e2b68d9328a7..6b127be781d9 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -863,7 +863,7 @@ static void rndis_set_multicast(struct work_struct *w)
863 if (flags & IFF_PROMISC) { 863 if (flags & IFF_PROMISC) {
864 filter = NDIS_PACKET_TYPE_PROMISCUOUS; 864 filter = NDIS_PACKET_TYPE_PROMISCUOUS;
865 } else { 865 } else {
866 if (flags & IFF_ALLMULTI) 866 if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
867 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; 867 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
868 if (flags & IFF_BROADCAST) 868 if (flags & IFF_BROADCAST)
869 filter |= NDIS_PACKET_TYPE_BROADCAST; 869 filter |= NDIS_PACKET_TYPE_BROADCAST;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 222093e878a8..a6c6ce19eeee 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1203,11 +1203,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1203 goto err_dev_open; 1203 goto err_dev_open;
1204 } 1204 }
1205 1205
1206 netif_addr_lock_bh(dev);
1207 dev_uc_sync_multiple(port_dev, dev);
1208 dev_mc_sync_multiple(port_dev, dev);
1209 netif_addr_unlock_bh(dev);
1210
1211 err = vlan_vids_add_by_dev(port_dev, dev); 1206 err = vlan_vids_add_by_dev(port_dev, dev);
1212 if (err) { 1207 if (err) {
1213 netdev_err(dev, "Failed to add vlan ids to device %s\n", 1208 netdev_err(dev, "Failed to add vlan ids to device %s\n",
@@ -1247,6 +1242,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1247 goto err_option_port_add; 1242 goto err_option_port_add;
1248 } 1243 }
1249 1244
1245 netif_addr_lock_bh(dev);
1246 dev_uc_sync_multiple(port_dev, dev);
1247 dev_mc_sync_multiple(port_dev, dev);
1248 netif_addr_unlock_bh(dev);
1249
1250 port->index = -1; 1250 port->index = -1;
1251 list_add_tail_rcu(&port->list, &team->port_list); 1251 list_add_tail_rcu(&port->list, &team->port_list);
1252 team_port_enable(team, port); 1252 team_port_enable(team, port);
@@ -1271,8 +1271,6 @@ err_enable_netpoll:
1271 vlan_vids_del_by_dev(port_dev, dev); 1271 vlan_vids_del_by_dev(port_dev, dev);
1272 1272
1273err_vids_add: 1273err_vids_add:
1274 dev_uc_unsync(port_dev, dev);
1275 dev_mc_unsync(port_dev, dev);
1276 dev_close(port_dev); 1274 dev_close(port_dev);
1277 1275
1278err_dev_open: 1276err_dev_open:
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 60a604cc7647..55a78eb96961 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2351,6 +2351,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
2351 u32 buf; 2351 u32 buf;
2352 int ret = 0; 2352 int ret = 0;
2353 unsigned long timeout; 2353 unsigned long timeout;
2354 u8 sig;
2354 2355
2355 ret = lan78xx_read_reg(dev, HW_CFG, &buf); 2356 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2356 buf |= HW_CFG_LRST_; 2357 buf |= HW_CFG_LRST_;
@@ -2450,6 +2451,15 @@ static int lan78xx_reset(struct lan78xx_net *dev)
2450 /* LAN7801 only has RGMII mode */ 2451 /* LAN7801 only has RGMII mode */
2451 if (dev->chipid == ID_REV_CHIP_ID_7801_) 2452 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2452 buf &= ~MAC_CR_GMII_EN_; 2453 buf &= ~MAC_CR_GMII_EN_;
2454
2455 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2456 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2457 if (!ret && sig != EEPROM_INDICATOR) {
2458 /* Implies there is no external eeprom. Set mac speed */
2459 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2460 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2461 }
2462 }
2453 ret = lan78xx_write_reg(dev, MAC_CR, buf); 2463 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2454 2464
2455 ret = lan78xx_read_reg(dev, MAC_TX, &buf); 2465 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
@@ -2863,8 +2873,7 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2863 if (ret < 0) { 2873 if (ret < 0) {
2864 netdev_warn(dev->net, 2874 netdev_warn(dev->net,
2865 "lan78xx_setup_irq_domain() failed : %d", ret); 2875 "lan78xx_setup_irq_domain() failed : %d", ret);
2866 kfree(pdata); 2876 goto out1;
2867 return ret;
2868 } 2877 }
2869 2878
2870 dev->net->hard_header_len += TX_OVERHEAD; 2879 dev->net->hard_header_len += TX_OVERHEAD;
@@ -2872,14 +2881,32 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2872 2881
2873 /* Init all registers */ 2882 /* Init all registers */
2874 ret = lan78xx_reset(dev); 2883 ret = lan78xx_reset(dev);
2884 if (ret) {
2885 netdev_warn(dev->net, "Registers INIT FAILED....");
2886 goto out2;
2887 }
2875 2888
2876 ret = lan78xx_mdio_init(dev); 2889 ret = lan78xx_mdio_init(dev);
2890 if (ret) {
2891 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2892 goto out2;
2893 }
2877 2894
2878 dev->net->flags |= IFF_MULTICAST; 2895 dev->net->flags |= IFF_MULTICAST;
2879 2896
2880 pdata->wol = WAKE_MAGIC; 2897 pdata->wol = WAKE_MAGIC;
2881 2898
2882 return ret; 2899 return ret;
2900
2901out2:
2902 lan78xx_remove_irq_domain(dev);
2903
2904out1:
2905 netdev_warn(dev->net, "Bind routine FAILED");
2906 cancel_work_sync(&pdata->set_multicast);
2907 cancel_work_sync(&pdata->set_vlan);
2908 kfree(pdata);
2909 return ret;
2883} 2910}
2884 2911
2885static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) 2912static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
@@ -2891,6 +2918,8 @@ static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2891 lan78xx_remove_mdio(dev); 2918 lan78xx_remove_mdio(dev);
2892 2919
2893 if (pdata) { 2920 if (pdata) {
2921 cancel_work_sync(&pdata->set_multicast);
2922 cancel_work_sync(&pdata->set_vlan);
2894 netif_dbg(dev, ifdown, dev->net, "free pdata"); 2923 netif_dbg(dev, ifdown, dev->net, "free pdata");
2895 kfree(pdata); 2924 kfree(pdata);
2896 pdata = NULL; 2925 pdata = NULL;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 76ac48095c29..ca066b785e9f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1104,6 +1104,9 @@ static const struct usb_device_id products[] = {
1104 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, 1104 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
1105 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 1105 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
1106 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 1106 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
1107 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
1108 {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
1109 {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
1107 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ 1110 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
1108 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ 1111 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
1109 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ 1112 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
@@ -1180,6 +1183,7 @@ static const struct usb_device_id products[] = {
1180 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 1183 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1181 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ 1184 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
1182 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ 1185 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1186 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
1183 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 1187 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1184 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1188 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1185 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 1189 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -1240,6 +1244,7 @@ static const struct usb_device_id products[] = {
1240 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ 1244 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
1241 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ 1245 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
1242 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 1246 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
1247 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
1243 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ 1248 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
1244 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ 1249 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
1245 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ 1250 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 102582459bef..0a2b180d138a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -578,12 +578,13 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
578 if (!IS_ERR(neigh)) { 578 if (!IS_ERR(neigh)) {
579 sock_confirm_neigh(skb, neigh); 579 sock_confirm_neigh(skb, neigh);
580 ret = neigh_output(neigh, skb); 580 ret = neigh_output(neigh, skb);
581 rcu_read_unlock_bh();
582 return ret;
581 } 583 }
582 584
583 rcu_read_unlock_bh(); 585 rcu_read_unlock_bh();
584err: 586err:
585 if (unlikely(ret < 0)) 587 vrf_tx_error(skb->dev, skb);
586 vrf_tx_error(skb->dev, skb);
587 return ret; 588 return ret;
588} 589}
589 590
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
index c32399faff57..90c274490181 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
@@ -124,7 +124,7 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
124 EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c), 124 EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c),
125}; 125};
126 126
127const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { 127static const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {
128 { 128 {
129 /* pin-controller instance 0 data */ 129 /* pin-controller instance 0 data */
130 .pin_banks = s5pv210_pin_bank, 130 .pin_banks = s5pv210_pin_bank,
@@ -137,6 +137,11 @@ const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {
137 }, 137 },
138}; 138};
139 139
140const struct samsung_pinctrl_of_match_data s5pv210_of_data __initconst = {
141 .ctrl = s5pv210_pin_ctrl,
142 .num_ctrl = ARRAY_SIZE(s5pv210_pin_ctrl),
143};
144
140/* Pad retention control code for accessing PMU regmap */ 145/* Pad retention control code for accessing PMU regmap */
141static atomic_t exynos_shared_retention_refcnt; 146static atomic_t exynos_shared_retention_refcnt;
142 147
@@ -199,7 +204,7 @@ static const struct samsung_retention_data exynos3250_retention_data __initconst
199 * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes 204 * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes
200 * two gpio/pin-mux/pinconfig controllers. 205 * two gpio/pin-mux/pinconfig controllers.
201 */ 206 */
202const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { 207static const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {
203 { 208 {
204 /* pin-controller instance 0 data */ 209 /* pin-controller instance 0 data */
205 .pin_banks = exynos3250_pin_banks0, 210 .pin_banks = exynos3250_pin_banks0,
@@ -220,6 +225,11 @@ const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {
220 }, 225 },
221}; 226};
222 227
228const struct samsung_pinctrl_of_match_data exynos3250_of_data __initconst = {
229 .ctrl = exynos3250_pin_ctrl,
230 .num_ctrl = ARRAY_SIZE(exynos3250_pin_ctrl),
231};
232
223/* pin banks of exynos4210 pin-controller 0 */ 233/* pin banks of exynos4210 pin-controller 0 */
224static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = { 234static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = {
225 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 235 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -303,7 +313,7 @@ static const struct samsung_retention_data exynos4_audio_retention_data __initco
303 * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes 313 * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes
304 * three gpio/pin-mux/pinconfig controllers. 314 * three gpio/pin-mux/pinconfig controllers.
305 */ 315 */
306const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { 316static const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {
307 { 317 {
308 /* pin-controller instance 0 data */ 318 /* pin-controller instance 0 data */
309 .pin_banks = exynos4210_pin_banks0, 319 .pin_banks = exynos4210_pin_banks0,
@@ -329,6 +339,11 @@ const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {
329 }, 339 },
330}; 340};
331 341
342const struct samsung_pinctrl_of_match_data exynos4210_of_data __initconst = {
343 .ctrl = exynos4210_pin_ctrl,
344 .num_ctrl = ARRAY_SIZE(exynos4210_pin_ctrl),
345};
346
332/* pin banks of exynos4x12 pin-controller 0 */ 347/* pin banks of exynos4x12 pin-controller 0 */
333static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = { 348static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = {
334 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 349 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -391,7 +406,7 @@ static const struct samsung_pin_bank_data exynos4x12_pin_banks3[] __initconst =
391 * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes 406 * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes
392 * four gpio/pin-mux/pinconfig controllers. 407 * four gpio/pin-mux/pinconfig controllers.
393 */ 408 */
394const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { 409static const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
395 { 410 {
396 /* pin-controller instance 0 data */ 411 /* pin-controller instance 0 data */
397 .pin_banks = exynos4x12_pin_banks0, 412 .pin_banks = exynos4x12_pin_banks0,
@@ -427,6 +442,11 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
427 }, 442 },
428}; 443};
429 444
445const struct samsung_pinctrl_of_match_data exynos4x12_of_data __initconst = {
446 .ctrl = exynos4x12_pin_ctrl,
447 .num_ctrl = ARRAY_SIZE(exynos4x12_pin_ctrl),
448};
449
430/* pin banks of exynos5250 pin-controller 0 */ 450/* pin banks of exynos5250 pin-controller 0 */
431static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = { 451static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = {
432 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 452 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -487,7 +507,7 @@ static const struct samsung_pin_bank_data exynos5250_pin_banks3[] __initconst =
487 * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes 507 * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes
488 * four gpio/pin-mux/pinconfig controllers. 508 * four gpio/pin-mux/pinconfig controllers.
489 */ 509 */
490const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { 510static const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
491 { 511 {
492 /* pin-controller instance 0 data */ 512 /* pin-controller instance 0 data */
493 .pin_banks = exynos5250_pin_banks0, 513 .pin_banks = exynos5250_pin_banks0,
@@ -523,6 +543,11 @@ const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
523 }, 543 },
524}; 544};
525 545
546const struct samsung_pinctrl_of_match_data exynos5250_of_data __initconst = {
547 .ctrl = exynos5250_pin_ctrl,
548 .num_ctrl = ARRAY_SIZE(exynos5250_pin_ctrl),
549};
550
526/* pin banks of exynos5260 pin-controller 0 */ 551/* pin banks of exynos5260 pin-controller 0 */
527static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = { 552static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = {
528 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00), 553 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00),
@@ -567,7 +592,7 @@ static const struct samsung_pin_bank_data exynos5260_pin_banks2[] __initconst =
567 * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes 592 * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes
568 * three gpio/pin-mux/pinconfig controllers. 593 * three gpio/pin-mux/pinconfig controllers.
569 */ 594 */
570const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { 595static const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = {
571 { 596 {
572 /* pin-controller instance 0 data */ 597 /* pin-controller instance 0 data */
573 .pin_banks = exynos5260_pin_banks0, 598 .pin_banks = exynos5260_pin_banks0,
@@ -587,6 +612,11 @@ const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = {
587 }, 612 },
588}; 613};
589 614
615const struct samsung_pinctrl_of_match_data exynos5260_of_data __initconst = {
616 .ctrl = exynos5260_pin_ctrl,
617 .num_ctrl = ARRAY_SIZE(exynos5260_pin_ctrl),
618};
619
590/* pin banks of exynos5410 pin-controller 0 */ 620/* pin banks of exynos5410 pin-controller 0 */
591static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = { 621static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = {
592 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 622 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -657,7 +687,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks3[] __initconst =
657 * Samsung pinctrl driver data for Exynos5410 SoC. Exynos5410 SoC includes 687 * Samsung pinctrl driver data for Exynos5410 SoC. Exynos5410 SoC includes
658 * four gpio/pin-mux/pinconfig controllers. 688 * four gpio/pin-mux/pinconfig controllers.
659 */ 689 */
660const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { 690static const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = {
661 { 691 {
662 /* pin-controller instance 0 data */ 692 /* pin-controller instance 0 data */
663 .pin_banks = exynos5410_pin_banks0, 693 .pin_banks = exynos5410_pin_banks0,
@@ -690,6 +720,11 @@ const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = {
690 }, 720 },
691}; 721};
692 722
723const struct samsung_pinctrl_of_match_data exynos5410_of_data __initconst = {
724 .ctrl = exynos5410_pin_ctrl,
725 .num_ctrl = ARRAY_SIZE(exynos5410_pin_ctrl),
726};
727
693/* pin banks of exynos5420 pin-controller 0 */ 728/* pin banks of exynos5420 pin-controller 0 */
694static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = { 729static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = {
695 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00), 730 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00),
@@ -774,7 +809,7 @@ static const struct samsung_retention_data exynos5420_retention_data __initconst
774 * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes 809 * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes
775 * four gpio/pin-mux/pinconfig controllers. 810 * four gpio/pin-mux/pinconfig controllers.
776 */ 811 */
777const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { 812static const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
778 { 813 {
779 /* pin-controller instance 0 data */ 814 /* pin-controller instance 0 data */
780 .pin_banks = exynos5420_pin_banks0, 815 .pin_banks = exynos5420_pin_banks0,
@@ -808,3 +843,8 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
808 .retention_data = &exynos4_audio_retention_data, 843 .retention_data = &exynos4_audio_retention_data,
809 }, 844 },
810}; 845};
846
847const struct samsung_pinctrl_of_match_data exynos5420_of_data __initconst = {
848 .ctrl = exynos5420_pin_ctrl,
849 .num_ctrl = ARRAY_SIZE(exynos5420_pin_ctrl),
850};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index fc8f7833bec0..71c9d1d9f345 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -175,7 +175,7 @@ static const struct samsung_retention_data exynos5433_fsys_retention_data __init
175 * Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes 175 * Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes
176 * ten gpio/pin-mux/pinconfig controllers. 176 * ten gpio/pin-mux/pinconfig controllers.
177 */ 177 */
178const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { 178static const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = {
179 { 179 {
180 /* pin-controller instance 0 data */ 180 /* pin-controller instance 0 data */
181 .pin_banks = exynos5433_pin_banks0, 181 .pin_banks = exynos5433_pin_banks0,
@@ -260,6 +260,11 @@ const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = {
260 }, 260 },
261}; 261};
262 262
263const struct samsung_pinctrl_of_match_data exynos5433_of_data __initconst = {
264 .ctrl = exynos5433_pin_ctrl,
265 .num_ctrl = ARRAY_SIZE(exynos5433_pin_ctrl),
266};
267
263/* pin banks of exynos7 pin-controller - ALIVE */ 268/* pin banks of exynos7 pin-controller - ALIVE */
264static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = { 269static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = {
265 EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), 270 EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
@@ -339,7 +344,7 @@ static const struct samsung_pin_bank_data exynos7_pin_banks9[] __initconst = {
339 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), 344 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
340}; 345};
341 346
342const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { 347static const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = {
343 { 348 {
344 /* pin-controller instance 0 Alive data */ 349 /* pin-controller instance 0 Alive data */
345 .pin_banks = exynos7_pin_banks0, 350 .pin_banks = exynos7_pin_banks0,
@@ -392,3 +397,8 @@ const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = {
392 .eint_gpio_init = exynos_eint_gpio_init, 397 .eint_gpio_init = exynos_eint_gpio_init,
393 }, 398 },
394}; 399};
400
401const struct samsung_pinctrl_of_match_data exynos7_of_data __initconst = {
402 .ctrl = exynos7_pin_ctrl,
403 .num_ctrl = ARRAY_SIZE(exynos7_pin_ctrl),
404};
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 10187cb0e9b9..7e824e4d20f4 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -565,7 +565,7 @@ static const struct samsung_pin_bank_data s3c2412_pin_banks[] __initconst = {
565 PIN_BANK_2BIT(13, 0x080, "gpj"), 565 PIN_BANK_2BIT(13, 0x080, "gpj"),
566}; 566};
567 567
568const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { 568static const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = {
569 { 569 {
570 .pin_banks = s3c2412_pin_banks, 570 .pin_banks = s3c2412_pin_banks,
571 .nr_banks = ARRAY_SIZE(s3c2412_pin_banks), 571 .nr_banks = ARRAY_SIZE(s3c2412_pin_banks),
@@ -573,6 +573,11 @@ const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = {
573 }, 573 },
574}; 574};
575 575
576const struct samsung_pinctrl_of_match_data s3c2412_of_data __initconst = {
577 .ctrl = s3c2412_pin_ctrl,
578 .num_ctrl = ARRAY_SIZE(s3c2412_pin_ctrl),
579};
580
576static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = { 581static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = {
577 PIN_BANK_A(27, 0x000, "gpa"), 582 PIN_BANK_A(27, 0x000, "gpa"),
578 PIN_BANK_2BIT(11, 0x010, "gpb"), 583 PIN_BANK_2BIT(11, 0x010, "gpb"),
@@ -587,7 +592,7 @@ static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = {
587 PIN_BANK_2BIT(2, 0x100, "gpm"), 592 PIN_BANK_2BIT(2, 0x100, "gpm"),
588}; 593};
589 594
590const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { 595static const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = {
591 { 596 {
592 .pin_banks = s3c2416_pin_banks, 597 .pin_banks = s3c2416_pin_banks,
593 .nr_banks = ARRAY_SIZE(s3c2416_pin_banks), 598 .nr_banks = ARRAY_SIZE(s3c2416_pin_banks),
@@ -595,6 +600,11 @@ const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = {
595 }, 600 },
596}; 601};
597 602
603const struct samsung_pinctrl_of_match_data s3c2416_of_data __initconst = {
604 .ctrl = s3c2416_pin_ctrl,
605 .num_ctrl = ARRAY_SIZE(s3c2416_pin_ctrl),
606};
607
598static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = { 608static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = {
599 PIN_BANK_A(25, 0x000, "gpa"), 609 PIN_BANK_A(25, 0x000, "gpa"),
600 PIN_BANK_2BIT(11, 0x010, "gpb"), 610 PIN_BANK_2BIT(11, 0x010, "gpb"),
@@ -607,7 +617,7 @@ static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = {
607 PIN_BANK_2BIT(13, 0x0d0, "gpj"), 617 PIN_BANK_2BIT(13, 0x0d0, "gpj"),
608}; 618};
609 619
610const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { 620static const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = {
611 { 621 {
612 .pin_banks = s3c2440_pin_banks, 622 .pin_banks = s3c2440_pin_banks,
613 .nr_banks = ARRAY_SIZE(s3c2440_pin_banks), 623 .nr_banks = ARRAY_SIZE(s3c2440_pin_banks),
@@ -615,6 +625,11 @@ const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = {
615 }, 625 },
616}; 626};
617 627
628const struct samsung_pinctrl_of_match_data s3c2440_of_data __initconst = {
629 .ctrl = s3c2440_pin_ctrl,
630 .num_ctrl = ARRAY_SIZE(s3c2440_pin_ctrl),
631};
632
618static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = { 633static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = {
619 PIN_BANK_A(28, 0x000, "gpa"), 634 PIN_BANK_A(28, 0x000, "gpa"),
620 PIN_BANK_2BIT(11, 0x010, "gpb"), 635 PIN_BANK_2BIT(11, 0x010, "gpb"),
@@ -630,10 +645,15 @@ static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = {
630 PIN_BANK_2BIT(2, 0x100, "gpm"), 645 PIN_BANK_2BIT(2, 0x100, "gpm"),
631}; 646};
632 647
633const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = { 648static const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = {
634 { 649 {
635 .pin_banks = s3c2450_pin_banks, 650 .pin_banks = s3c2450_pin_banks,
636 .nr_banks = ARRAY_SIZE(s3c2450_pin_banks), 651 .nr_banks = ARRAY_SIZE(s3c2450_pin_banks),
637 .eint_wkup_init = s3c24xx_eint_init, 652 .eint_wkup_init = s3c24xx_eint_init,
638 }, 653 },
639}; 654};
655
656const struct samsung_pinctrl_of_match_data s3c2450_of_data __initconst = {
657 .ctrl = s3c2450_pin_ctrl,
658 .num_ctrl = ARRAY_SIZE(s3c2450_pin_ctrl),
659};
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index 679628ac4b31..288e6567ceb1 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -789,7 +789,7 @@ static const struct samsung_pin_bank_data s3c64xx_pin_banks0[] __initconst = {
789 * Samsung pinctrl driver data for S3C64xx SoC. S3C64xx SoC includes 789 * Samsung pinctrl driver data for S3C64xx SoC. S3C64xx SoC includes
790 * one gpio/pin-mux/pinconfig controller. 790 * one gpio/pin-mux/pinconfig controller.
791 */ 791 */
792const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { 792static const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = {
793 { 793 {
794 /* pin-controller instance 1 data */ 794 /* pin-controller instance 1 data */
795 .pin_banks = s3c64xx_pin_banks0, 795 .pin_banks = s3c64xx_pin_banks0,
@@ -798,3 +798,8 @@ const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = {
798 .eint_wkup_init = s3c64xx_eint_eint0_init, 798 .eint_wkup_init = s3c64xx_eint_eint0_init,
799 }, 799 },
800}; 800};
801
802const struct samsung_pinctrl_of_match_data s3c64xx_of_data __initconst = {
803 .ctrl = s3c64xx_pin_ctrl,
804 .num_ctrl = ARRAY_SIZE(s3c64xx_pin_ctrl),
805};
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index da58e4554137..336e88d7bdb9 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -942,12 +942,33 @@ static int samsung_gpiolib_register(struct platform_device *pdev,
942 return 0; 942 return 0;
943} 943}
944 944
945static const struct samsung_pin_ctrl *
946samsung_pinctrl_get_soc_data_for_of_alias(struct platform_device *pdev)
947{
948 struct device_node *node = pdev->dev.of_node;
949 const struct samsung_pinctrl_of_match_data *of_data;
950 int id;
951
952 id = of_alias_get_id(node, "pinctrl");
953 if (id < 0) {
954 dev_err(&pdev->dev, "failed to get alias id\n");
955 return NULL;
956 }
957
958 of_data = of_device_get_match_data(&pdev->dev);
959 if (id >= of_data->num_ctrl) {
960 dev_err(&pdev->dev, "invalid alias id %d\n", id);
961 return NULL;
962 }
963
964 return &(of_data->ctrl[id]);
965}
966
945/* retrieve the soc specific data */ 967/* retrieve the soc specific data */
946static const struct samsung_pin_ctrl * 968static const struct samsung_pin_ctrl *
947samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, 969samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
948 struct platform_device *pdev) 970 struct platform_device *pdev)
949{ 971{
950 int id;
951 struct device_node *node = pdev->dev.of_node; 972 struct device_node *node = pdev->dev.of_node;
952 struct device_node *np; 973 struct device_node *np;
953 const struct samsung_pin_bank_data *bdata; 974 const struct samsung_pin_bank_data *bdata;
@@ -957,13 +978,9 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
957 void __iomem *virt_base[SAMSUNG_PINCTRL_NUM_RESOURCES]; 978 void __iomem *virt_base[SAMSUNG_PINCTRL_NUM_RESOURCES];
958 unsigned int i; 979 unsigned int i;
959 980
960 id = of_alias_get_id(node, "pinctrl"); 981 ctrl = samsung_pinctrl_get_soc_data_for_of_alias(pdev);
961 if (id < 0) { 982 if (!ctrl)
962 dev_err(&pdev->dev, "failed to get alias id\n");
963 return ERR_PTR(-ENOENT); 983 return ERR_PTR(-ENOENT);
964 }
965 ctrl = of_device_get_match_data(&pdev->dev);
966 ctrl += id;
967 984
968 d->suspend = ctrl->suspend; 985 d->suspend = ctrl->suspend;
969 d->resume = ctrl->resume; 986 d->resume = ctrl->resume;
@@ -1188,41 +1205,41 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev)
1188static const struct of_device_id samsung_pinctrl_dt_match[] = { 1205static const struct of_device_id samsung_pinctrl_dt_match[] = {
1189#ifdef CONFIG_PINCTRL_EXYNOS_ARM 1206#ifdef CONFIG_PINCTRL_EXYNOS_ARM
1190 { .compatible = "samsung,exynos3250-pinctrl", 1207 { .compatible = "samsung,exynos3250-pinctrl",
1191 .data = exynos3250_pin_ctrl }, 1208 .data = &exynos3250_of_data },
1192 { .compatible = "samsung,exynos4210-pinctrl", 1209 { .compatible = "samsung,exynos4210-pinctrl",
1193 .data = exynos4210_pin_ctrl }, 1210 .data = &exynos4210_of_data },
1194 { .compatible = "samsung,exynos4x12-pinctrl", 1211 { .compatible = "samsung,exynos4x12-pinctrl",
1195 .data = exynos4x12_pin_ctrl }, 1212 .data = &exynos4x12_of_data },
1196 { .compatible = "samsung,exynos5250-pinctrl", 1213 { .compatible = "samsung,exynos5250-pinctrl",
1197 .data = exynos5250_pin_ctrl }, 1214 .data = &exynos5250_of_data },
1198 { .compatible = "samsung,exynos5260-pinctrl", 1215 { .compatible = "samsung,exynos5260-pinctrl",
1199 .data = exynos5260_pin_ctrl }, 1216 .data = &exynos5260_of_data },
1200 { .compatible = "samsung,exynos5410-pinctrl", 1217 { .compatible = "samsung,exynos5410-pinctrl",
1201 .data = exynos5410_pin_ctrl }, 1218 .data = &exynos5410_of_data },
1202 { .compatible = "samsung,exynos5420-pinctrl", 1219 { .compatible = "samsung,exynos5420-pinctrl",
1203 .data = exynos5420_pin_ctrl }, 1220 .data = &exynos5420_of_data },
1204 { .compatible = "samsung,s5pv210-pinctrl", 1221 { .compatible = "samsung,s5pv210-pinctrl",
1205 .data = s5pv210_pin_ctrl }, 1222 .data = &s5pv210_of_data },
1206#endif 1223#endif
1207#ifdef CONFIG_PINCTRL_EXYNOS_ARM64 1224#ifdef CONFIG_PINCTRL_EXYNOS_ARM64
1208 { .compatible = "samsung,exynos5433-pinctrl", 1225 { .compatible = "samsung,exynos5433-pinctrl",
1209 .data = exynos5433_pin_ctrl }, 1226 .data = &exynos5433_of_data },
1210 { .compatible = "samsung,exynos7-pinctrl", 1227 { .compatible = "samsung,exynos7-pinctrl",
1211 .data = exynos7_pin_ctrl }, 1228 .data = &exynos7_of_data },
1212#endif 1229#endif
1213#ifdef CONFIG_PINCTRL_S3C64XX 1230#ifdef CONFIG_PINCTRL_S3C64XX
1214 { .compatible = "samsung,s3c64xx-pinctrl", 1231 { .compatible = "samsung,s3c64xx-pinctrl",
1215 .data = s3c64xx_pin_ctrl }, 1232 .data = &s3c64xx_of_data },
1216#endif 1233#endif
1217#ifdef CONFIG_PINCTRL_S3C24XX 1234#ifdef CONFIG_PINCTRL_S3C24XX
1218 { .compatible = "samsung,s3c2412-pinctrl", 1235 { .compatible = "samsung,s3c2412-pinctrl",
1219 .data = s3c2412_pin_ctrl }, 1236 .data = &s3c2412_of_data },
1220 { .compatible = "samsung,s3c2416-pinctrl", 1237 { .compatible = "samsung,s3c2416-pinctrl",
1221 .data = s3c2416_pin_ctrl }, 1238 .data = &s3c2416_of_data },
1222 { .compatible = "samsung,s3c2440-pinctrl", 1239 { .compatible = "samsung,s3c2440-pinctrl",
1223 .data = s3c2440_pin_ctrl }, 1240 .data = &s3c2440_of_data },
1224 { .compatible = "samsung,s3c2450-pinctrl", 1241 { .compatible = "samsung,s3c2450-pinctrl",
1225 .data = s3c2450_pin_ctrl }, 1242 .data = &s3c2450_of_data },
1226#endif 1243#endif
1227 {}, 1244 {},
1228}; 1245};
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index e204f609823b..f0cda9424dfe 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -282,6 +282,16 @@ struct samsung_pinctrl_drv_data {
282}; 282};
283 283
284/** 284/**
285 * struct samsung_pinctrl_of_match_data: OF match device specific configuration data.
286 * @ctrl: array of pin controller data.
287 * @num_ctrl: size of array @ctrl.
288 */
289struct samsung_pinctrl_of_match_data {
290 const struct samsung_pin_ctrl *ctrl;
291 unsigned int num_ctrl;
292};
293
294/**
285 * struct samsung_pin_group: represent group of pins of a pinmux function. 295 * struct samsung_pin_group: represent group of pins of a pinmux function.
286 * @name: name of the pin group, used to lookup the group. 296 * @name: name of the pin group, used to lookup the group.
287 * @pins: the pins included in this group. 297 * @pins: the pins included in this group.
@@ -309,20 +319,20 @@ struct samsung_pmx_func {
309}; 319};
310 320
311/* list of all exported SoC specific data */ 321/* list of all exported SoC specific data */
312extern const struct samsung_pin_ctrl exynos3250_pin_ctrl[]; 322extern const struct samsung_pinctrl_of_match_data exynos3250_of_data;
313extern const struct samsung_pin_ctrl exynos4210_pin_ctrl[]; 323extern const struct samsung_pinctrl_of_match_data exynos4210_of_data;
314extern const struct samsung_pin_ctrl exynos4x12_pin_ctrl[]; 324extern const struct samsung_pinctrl_of_match_data exynos4x12_of_data;
315extern const struct samsung_pin_ctrl exynos5250_pin_ctrl[]; 325extern const struct samsung_pinctrl_of_match_data exynos5250_of_data;
316extern const struct samsung_pin_ctrl exynos5260_pin_ctrl[]; 326extern const struct samsung_pinctrl_of_match_data exynos5260_of_data;
317extern const struct samsung_pin_ctrl exynos5410_pin_ctrl[]; 327extern const struct samsung_pinctrl_of_match_data exynos5410_of_data;
318extern const struct samsung_pin_ctrl exynos5420_pin_ctrl[]; 328extern const struct samsung_pinctrl_of_match_data exynos5420_of_data;
319extern const struct samsung_pin_ctrl exynos5433_pin_ctrl[]; 329extern const struct samsung_pinctrl_of_match_data exynos5433_of_data;
320extern const struct samsung_pin_ctrl exynos7_pin_ctrl[]; 330extern const struct samsung_pinctrl_of_match_data exynos7_of_data;
321extern const struct samsung_pin_ctrl s3c64xx_pin_ctrl[]; 331extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data;
322extern const struct samsung_pin_ctrl s3c2412_pin_ctrl[]; 332extern const struct samsung_pinctrl_of_match_data s3c2412_of_data;
323extern const struct samsung_pin_ctrl s3c2416_pin_ctrl[]; 333extern const struct samsung_pinctrl_of_match_data s3c2416_of_data;
324extern const struct samsung_pin_ctrl s3c2440_pin_ctrl[]; 334extern const struct samsung_pinctrl_of_match_data s3c2440_of_data;
325extern const struct samsung_pin_ctrl s3c2450_pin_ctrl[]; 335extern const struct samsung_pinctrl_of_match_data s3c2450_of_data;
326extern const struct samsung_pin_ctrl s5pv210_pin_ctrl[]; 336extern const struct samsung_pinctrl_of_match_data s5pv210_of_data;
327 337
328#endif /* __PINCTRL_SAMSUNG_H */ 338#endif /* __PINCTRL_SAMSUNG_H */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 18aeee592fdc..35951e7b89d2 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -1538,7 +1538,6 @@ static const struct sh_pfc_pin pinmux_pins[] = {
1538 SH_PFC_PIN_NAMED_CFG('B', 18, AVB_TD1, CFG_FLAGS), 1538 SH_PFC_PIN_NAMED_CFG('B', 18, AVB_TD1, CFG_FLAGS),
1539 SH_PFC_PIN_NAMED_CFG('B', 19, AVB_RXC, CFG_FLAGS), 1539 SH_PFC_PIN_NAMED_CFG('B', 19, AVB_RXC, CFG_FLAGS),
1540 SH_PFC_PIN_NAMED_CFG('C', 1, PRESETOUT#, CFG_FLAGS), 1540 SH_PFC_PIN_NAMED_CFG('C', 1, PRESETOUT#, CFG_FLAGS),
1541 SH_PFC_PIN_NAMED_CFG('F', 1, CLKOUT, CFG_FLAGS),
1542 SH_PFC_PIN_NAMED_CFG('H', 37, MLB_REF, CFG_FLAGS), 1541 SH_PFC_PIN_NAMED_CFG('H', 37, MLB_REF, CFG_FLAGS),
1543 SH_PFC_PIN_NAMED_CFG('V', 3, QSPI1_SPCLK, CFG_FLAGS), 1542 SH_PFC_PIN_NAMED_CFG('V', 3, QSPI1_SPCLK, CFG_FLAGS),
1544 SH_PFC_PIN_NAMED_CFG('V', 5, QSPI1_SSL, CFG_FLAGS), 1543 SH_PFC_PIN_NAMED_CFG('V', 5, QSPI1_SSL, CFG_FLAGS),
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index dd9464920456..ef22b275d050 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -474,6 +474,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
474 shost->dma_boundary = 0xffffffff; 474 shost->dma_boundary = 0xffffffff;
475 475
476 shost->use_blk_mq = scsi_use_blk_mq; 476 shost->use_blk_mq = scsi_use_blk_mq;
477 shost->use_blk_mq = scsi_use_blk_mq || shost->hostt->force_blk_mq;
477 478
478 device_initialize(&shost->shost_gendev); 479 device_initialize(&shost->shost_gendev);
479 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); 480 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 5293e6827ce5..3a9eca163db8 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1045,11 +1045,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1045 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 1045 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1046 if (unlikely(!h->msix_vectors)) 1046 if (unlikely(!h->msix_vectors))
1047 return; 1047 return;
1048 if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) 1048 c->Header.ReplyQueue = reply_queue;
1049 c->Header.ReplyQueue =
1050 raw_smp_processor_id() % h->nreply_queues;
1051 else
1052 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
1053 } 1049 }
1054} 1050}
1055 1051
@@ -1063,10 +1059,7 @@ static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1063 * Tell the controller to post the reply to the queue for this 1059 * Tell the controller to post the reply to the queue for this
1064 * processor. This seems to give the best I/O throughput. 1060 * processor. This seems to give the best I/O throughput.
1065 */ 1061 */
1066 if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) 1062 cp->ReplyQueue = reply_queue;
1067 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
1068 else
1069 cp->ReplyQueue = reply_queue % h->nreply_queues;
1070 /* 1063 /*
1071 * Set the bits in the address sent down to include: 1064 * Set the bits in the address sent down to include:
1072 * - performant mode bit (bit 0) 1065 * - performant mode bit (bit 0)
@@ -1087,10 +1080,7 @@ static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1087 /* Tell the controller to post the reply to the queue for this 1080 /* Tell the controller to post the reply to the queue for this
1088 * processor. This seems to give the best I/O throughput. 1081 * processor. This seems to give the best I/O throughput.
1089 */ 1082 */
1090 if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) 1083 cp->reply_queue = reply_queue;
1091 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1092 else
1093 cp->reply_queue = reply_queue % h->nreply_queues;
1094 /* Set the bits in the address sent down to include: 1084 /* Set the bits in the address sent down to include:
1095 * - performant mode bit not used in ioaccel mode 2 1085 * - performant mode bit not used in ioaccel mode 2
1096 * - pull count (bits 0-3) 1086 * - pull count (bits 0-3)
@@ -1109,10 +1099,7 @@ static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1109 * Tell the controller to post the reply to the queue for this 1099 * Tell the controller to post the reply to the queue for this
1110 * processor. This seems to give the best I/O throughput. 1100 * processor. This seems to give the best I/O throughput.
1111 */ 1101 */
1112 if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) 1102 cp->reply_queue = reply_queue;
1113 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1114 else
1115 cp->reply_queue = reply_queue % h->nreply_queues;
1116 /* 1103 /*
1117 * Set the bits in the address sent down to include: 1104 * Set the bits in the address sent down to include:
1118 * - performant mode bit not used in ioaccel mode 2 1105 * - performant mode bit not used in ioaccel mode 2
@@ -1157,6 +1144,8 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1157{ 1144{
1158 dial_down_lockup_detection_during_fw_flash(h, c); 1145 dial_down_lockup_detection_during_fw_flash(h, c);
1159 atomic_inc(&h->commands_outstanding); 1146 atomic_inc(&h->commands_outstanding);
1147
1148 reply_queue = h->reply_map[raw_smp_processor_id()];
1160 switch (c->cmd_type) { 1149 switch (c->cmd_type) {
1161 case CMD_IOACCEL1: 1150 case CMD_IOACCEL1:
1162 set_ioaccel1_performant_mode(h, c, reply_queue); 1151 set_ioaccel1_performant_mode(h, c, reply_queue);
@@ -7376,6 +7365,26 @@ static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7376 h->msix_vectors = 0; 7365 h->msix_vectors = 0;
7377} 7366}
7378 7367
7368static void hpsa_setup_reply_map(struct ctlr_info *h)
7369{
7370 const struct cpumask *mask;
7371 unsigned int queue, cpu;
7372
7373 for (queue = 0; queue < h->msix_vectors; queue++) {
7374 mask = pci_irq_get_affinity(h->pdev, queue);
7375 if (!mask)
7376 goto fallback;
7377
7378 for_each_cpu(cpu, mask)
7379 h->reply_map[cpu] = queue;
7380 }
7381 return;
7382
7383fallback:
7384 for_each_possible_cpu(cpu)
7385 h->reply_map[cpu] = 0;
7386}
7387
7379/* If MSI/MSI-X is supported by the kernel we will try to enable it on 7388/* If MSI/MSI-X is supported by the kernel we will try to enable it on
7380 * controllers that are capable. If not, we use legacy INTx mode. 7389 * controllers that are capable. If not, we use legacy INTx mode.
7381 */ 7390 */
@@ -7771,6 +7780,10 @@ static int hpsa_pci_init(struct ctlr_info *h)
7771 err = hpsa_interrupt_mode(h); 7780 err = hpsa_interrupt_mode(h);
7772 if (err) 7781 if (err)
7773 goto clean1; 7782 goto clean1;
7783
7784 /* setup mapping between CPU and reply queue */
7785 hpsa_setup_reply_map(h);
7786
7774 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 7787 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7775 if (err) 7788 if (err)
7776 goto clean2; /* intmode+region, pci */ 7789 goto clean2; /* intmode+region, pci */
@@ -8480,6 +8493,28 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8480 return wq; 8493 return wq;
8481} 8494}
8482 8495
8496static void hpda_free_ctlr_info(struct ctlr_info *h)
8497{
8498 kfree(h->reply_map);
8499 kfree(h);
8500}
8501
8502static struct ctlr_info *hpda_alloc_ctlr_info(void)
8503{
8504 struct ctlr_info *h;
8505
8506 h = kzalloc(sizeof(*h), GFP_KERNEL);
8507 if (!h)
8508 return NULL;
8509
8510 h->reply_map = kzalloc(sizeof(*h->reply_map) * nr_cpu_ids, GFP_KERNEL);
8511 if (!h->reply_map) {
8512 kfree(h);
8513 return NULL;
8514 }
8515 return h;
8516}
8517
8483static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8518static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8484{ 8519{
8485 int dac, rc; 8520 int dac, rc;
@@ -8517,7 +8552,7 @@ reinit_after_soft_reset:
8517 * the driver. See comments in hpsa.h for more info. 8552 * the driver. See comments in hpsa.h for more info.
8518 */ 8553 */
8519 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 8554 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8520 h = kzalloc(sizeof(*h), GFP_KERNEL); 8555 h = hpda_alloc_ctlr_info();
8521 if (!h) { 8556 if (!h) {
8522 dev_err(&pdev->dev, "Failed to allocate controller head\n"); 8557 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8523 return -ENOMEM; 8558 return -ENOMEM;
@@ -8916,7 +8951,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
8916 h->lockup_detected = NULL; /* init_one 2 */ 8951 h->lockup_detected = NULL; /* init_one 2 */
8917 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ 8952 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8918 8953
8919 kfree(h); /* init_one 1 */ 8954 hpda_free_ctlr_info(h); /* init_one 1 */
8920} 8955}
8921 8956
8922static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 8957static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 018f980a701c..fb9f5e7f8209 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -158,6 +158,7 @@ struct bmic_controller_parameters {
158#pragma pack() 158#pragma pack()
159 159
160struct ctlr_info { 160struct ctlr_info {
161 unsigned int *reply_map;
161 int ctlr; 162 int ctlr;
162 char devname[8]; 163 char devname[8];
163 char *product_name; 164 char *product_name;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b1b1d3a3b173..daefe8172b04 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3579,11 +3579,9 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
3579static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad, 3579static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
3580 struct ibmvfc_target *tgt) 3580 struct ibmvfc_target *tgt)
3581{ 3581{
3582 if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name, 3582 if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
3583 sizeof(tgt->ids.port_name)))
3584 return 1; 3583 return 1;
3585 if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name, 3584 if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
3586 sizeof(tgt->ids.node_name)))
3587 return 1; 3585 return 1;
3588 if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id) 3586 if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
3589 return 1; 3587 return 1;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0ad00dbf912d..6d886b13dbe9 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -37,6 +37,7 @@
37#include <linux/kfifo.h> 37#include <linux/kfifo.h>
38#include <linux/scatterlist.h> 38#include <linux/scatterlist.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/backing-dev.h>
40#include <net/tcp.h> 41#include <net/tcp.h>
41#include <scsi/scsi_cmnd.h> 42#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_device.h> 43#include <scsi/scsi_device.h>
@@ -954,6 +955,13 @@ static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
954 955
955static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev) 956static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
956{ 957{
958 struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host);
959 struct iscsi_session *session = tcp_sw_host->session;
960 struct iscsi_conn *conn = session->leadconn;
961
962 if (conn->datadgst_en)
963 sdev->request_queue->backing_dev_info->capabilities
964 |= BDI_CAP_STABLE_WRITES;
957 blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY); 965 blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
958 blk_queue_dma_alignment(sdev->request_queue, 0); 966 blk_queue_dma_alignment(sdev->request_queue, 0);
959 return 0; 967 return 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index ba6503f37756..27fab8235ea5 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2128,6 +2128,7 @@ enum MR_PD_TYPE {
2128 2128
2129struct megasas_instance { 2129struct megasas_instance {
2130 2130
2131 unsigned int *reply_map;
2131 __le32 *producer; 2132 __le32 *producer;
2132 dma_addr_t producer_h; 2133 dma_addr_t producer_h;
2133 __le32 *consumer; 2134 __le32 *consumer;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index a71ee67df084..dde0798b8a91 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5165,6 +5165,26 @@ skip_alloc:
5165 instance->use_seqnum_jbod_fp = false; 5165 instance->use_seqnum_jbod_fp = false;
5166} 5166}
5167 5167
5168static void megasas_setup_reply_map(struct megasas_instance *instance)
5169{
5170 const struct cpumask *mask;
5171 unsigned int queue, cpu;
5172
5173 for (queue = 0; queue < instance->msix_vectors; queue++) {
5174 mask = pci_irq_get_affinity(instance->pdev, queue);
5175 if (!mask)
5176 goto fallback;
5177
5178 for_each_cpu(cpu, mask)
5179 instance->reply_map[cpu] = queue;
5180 }
5181 return;
5182
5183fallback:
5184 for_each_possible_cpu(cpu)
5185 instance->reply_map[cpu] = cpu % instance->msix_vectors;
5186}
5187
5168/** 5188/**
5169 * megasas_init_fw - Initializes the FW 5189 * megasas_init_fw - Initializes the FW
5170 * @instance: Adapter soft state 5190 * @instance: Adapter soft state
@@ -5343,6 +5363,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
5343 goto fail_setup_irqs; 5363 goto fail_setup_irqs;
5344 } 5364 }
5345 5365
5366 megasas_setup_reply_map(instance);
5367
5346 dev_info(&instance->pdev->dev, 5368 dev_info(&instance->pdev->dev,
5347 "firmware supports msix\t: (%d)", fw_msix_count); 5369 "firmware supports msix\t: (%d)", fw_msix_count);
5348 dev_info(&instance->pdev->dev, 5370 dev_info(&instance->pdev->dev,
@@ -6123,20 +6145,29 @@ static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6123 */ 6145 */
6124static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) 6146static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6125{ 6147{
6148 instance->reply_map = kzalloc(sizeof(unsigned int) * nr_cpu_ids,
6149 GFP_KERNEL);
6150 if (!instance->reply_map)
6151 return -ENOMEM;
6152
6126 switch (instance->adapter_type) { 6153 switch (instance->adapter_type) {
6127 case MFI_SERIES: 6154 case MFI_SERIES:
6128 if (megasas_alloc_mfi_ctrl_mem(instance)) 6155 if (megasas_alloc_mfi_ctrl_mem(instance))
6129 return -ENOMEM; 6156 goto fail;
6130 break; 6157 break;
6131 case VENTURA_SERIES: 6158 case VENTURA_SERIES:
6132 case THUNDERBOLT_SERIES: 6159 case THUNDERBOLT_SERIES:
6133 case INVADER_SERIES: 6160 case INVADER_SERIES:
6134 if (megasas_alloc_fusion_context(instance)) 6161 if (megasas_alloc_fusion_context(instance))
6135 return -ENOMEM; 6162 goto fail;
6136 break; 6163 break;
6137 } 6164 }
6138 6165
6139 return 0; 6166 return 0;
6167 fail:
6168 kfree(instance->reply_map);
6169 instance->reply_map = NULL;
6170 return -ENOMEM;
6140} 6171}
6141 6172
6142/* 6173/*
@@ -6148,6 +6179,7 @@ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6148 */ 6179 */
6149static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) 6180static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6150{ 6181{
6182 kfree(instance->reply_map);
6151 if (instance->adapter_type == MFI_SERIES) { 6183 if (instance->adapter_type == MFI_SERIES) {
6152 if (instance->producer) 6184 if (instance->producer)
6153 pci_free_consistent(instance->pdev, sizeof(u32), 6185 pci_free_consistent(instance->pdev, sizeof(u32),
@@ -6540,7 +6572,6 @@ fail_io_attach:
6540 pci_free_irq_vectors(instance->pdev); 6572 pci_free_irq_vectors(instance->pdev);
6541fail_init_mfi: 6573fail_init_mfi:
6542 scsi_host_put(host); 6574 scsi_host_put(host);
6543
6544fail_alloc_instance: 6575fail_alloc_instance:
6545 pci_disable_device(pdev); 6576 pci_disable_device(pdev);
6546 6577
@@ -6746,6 +6777,8 @@ megasas_resume(struct pci_dev *pdev)
6746 if (rval < 0) 6777 if (rval < 0)
6747 goto fail_reenable_msix; 6778 goto fail_reenable_msix;
6748 6779
6780 megasas_setup_reply_map(instance);
6781
6749 if (instance->adapter_type != MFI_SERIES) { 6782 if (instance->adapter_type != MFI_SERIES) {
6750 megasas_reset_reply_desc(instance); 6783 megasas_reset_reply_desc(instance);
6751 if (megasas_ioc_init_fusion(instance)) { 6784 if (megasas_ioc_init_fusion(instance)) {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index dc8e850fbfd2..5ec3b74e8aed 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2641,11 +2641,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2641 fp_possible = (io_info.fpOkForIo > 0) ? true : false; 2641 fp_possible = (io_info.fpOkForIo > 0) ? true : false;
2642 } 2642 }
2643 2643
2644 /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU 2644 cmd->request_desc->SCSIIO.MSIxIndex =
2645 id by default, not CPU group id, otherwise all MSI-X queues won't 2645 instance->reply_map[raw_smp_processor_id()];
2646 be utilized */
2647 cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
2648 raw_smp_processor_id() % instance->msix_vectors : 0;
2649 2646
2650 praid_context = &io_request->RaidContext; 2647 praid_context = &io_request->RaidContext;
2651 2648
@@ -2971,10 +2968,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
2971 } 2968 }
2972 2969
2973 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; 2970 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
2974 cmd->request_desc->SCSIIO.MSIxIndex =
2975 instance->msix_vectors ?
2976 (raw_smp_processor_id() % instance->msix_vectors) : 0;
2977 2971
2972 cmd->request_desc->SCSIIO.MSIxIndex =
2973 instance->reply_map[raw_smp_processor_id()];
2978 2974
2979 if (!fp_possible) { 2975 if (!fp_possible) {
2980 /* system pd firmware path */ 2976 /* system pd firmware path */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3541caf3fceb..1fa84d6a0f8b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2484,6 +2484,8 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
2484 sector_size = old_sector_size; 2484 sector_size = old_sector_size;
2485 goto got_data; 2485 goto got_data;
2486 } 2486 }
2487 /* Remember that READ CAPACITY(16) succeeded */
2488 sdp->try_rc_10_first = 0;
2487 } 2489 }
2488 } 2490 }
2489 2491
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 7c28e8d4955a..45d04631888a 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -91,9 +91,6 @@ struct virtio_scsi_vq {
91struct virtio_scsi_target_state { 91struct virtio_scsi_target_state {
92 seqcount_t tgt_seq; 92 seqcount_t tgt_seq;
93 93
94 /* Count of outstanding requests. */
95 atomic_t reqs;
96
97 /* Currently active virtqueue for requests sent to this target. */ 94 /* Currently active virtqueue for requests sent to this target. */
98 struct virtio_scsi_vq *req_vq; 95 struct virtio_scsi_vq *req_vq;
99}; 96};
@@ -152,8 +149,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
152 struct virtio_scsi_cmd *cmd = buf; 149 struct virtio_scsi_cmd *cmd = buf;
153 struct scsi_cmnd *sc = cmd->sc; 150 struct scsi_cmnd *sc = cmd->sc;
154 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; 151 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
155 struct virtio_scsi_target_state *tgt =
156 scsi_target(sc->device)->hostdata;
157 152
158 dev_dbg(&sc->device->sdev_gendev, 153 dev_dbg(&sc->device->sdev_gendev,
159 "cmd %p response %u status %#02x sense_len %u\n", 154 "cmd %p response %u status %#02x sense_len %u\n",
@@ -210,8 +205,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
210 } 205 }
211 206
212 sc->scsi_done(sc); 207 sc->scsi_done(sc);
213
214 atomic_dec(&tgt->reqs);
215} 208}
216 209
217static void virtscsi_vq_done(struct virtio_scsi *vscsi, 210static void virtscsi_vq_done(struct virtio_scsi *vscsi,
@@ -529,11 +522,20 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
529} 522}
530#endif 523#endif
531 524
532static int virtscsi_queuecommand(struct virtio_scsi *vscsi, 525static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
533 struct virtio_scsi_vq *req_vq, 526 struct scsi_cmnd *sc)
527{
528 u32 tag = blk_mq_unique_tag(sc->request);
529 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
530
531 return &vscsi->req_vqs[hwq];
532}
533
534static int virtscsi_queuecommand(struct Scsi_Host *shost,
534 struct scsi_cmnd *sc) 535 struct scsi_cmnd *sc)
535{ 536{
536 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); 537 struct virtio_scsi *vscsi = shost_priv(shost);
538 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
537 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); 539 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
538 unsigned long flags; 540 unsigned long flags;
539 int req_size; 541 int req_size;
@@ -576,79 +578,6 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
576 return 0; 578 return 0;
577} 579}
578 580
579static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
580 struct scsi_cmnd *sc)
581{
582 struct virtio_scsi *vscsi = shost_priv(sh);
583 struct virtio_scsi_target_state *tgt =
584 scsi_target(sc->device)->hostdata;
585
586 atomic_inc(&tgt->reqs);
587 return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
588}
589
590static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
591 struct scsi_cmnd *sc)
592{
593 u32 tag = blk_mq_unique_tag(sc->request);
594 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
595
596 return &vscsi->req_vqs[hwq];
597}
598
599static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
600 struct virtio_scsi_target_state *tgt)
601{
602 struct virtio_scsi_vq *vq;
603 unsigned long flags;
604 u32 queue_num;
605
606 local_irq_save(flags);
607 if (atomic_inc_return(&tgt->reqs) > 1) {
608 unsigned long seq;
609
610 do {
611 seq = read_seqcount_begin(&tgt->tgt_seq);
612 vq = tgt->req_vq;
613 } while (read_seqcount_retry(&tgt->tgt_seq, seq));
614 } else {
615 /* no writes can be concurrent because of atomic_t */
616 write_seqcount_begin(&tgt->tgt_seq);
617
618 /* keep previous req_vq if a reader just arrived */
619 if (unlikely(atomic_read(&tgt->reqs) > 1)) {
620 vq = tgt->req_vq;
621 goto unlock;
622 }
623
624 queue_num = smp_processor_id();
625 while (unlikely(queue_num >= vscsi->num_queues))
626 queue_num -= vscsi->num_queues;
627 tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
628 unlock:
629 write_seqcount_end(&tgt->tgt_seq);
630 }
631 local_irq_restore(flags);
632
633 return vq;
634}
635
636static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
637 struct scsi_cmnd *sc)
638{
639 struct virtio_scsi *vscsi = shost_priv(sh);
640 struct virtio_scsi_target_state *tgt =
641 scsi_target(sc->device)->hostdata;
642 struct virtio_scsi_vq *req_vq;
643
644 if (shost_use_blk_mq(sh))
645 req_vq = virtscsi_pick_vq_mq(vscsi, sc);
646 else
647 req_vq = virtscsi_pick_vq(vscsi, tgt);
648
649 return virtscsi_queuecommand(vscsi, req_vq, sc);
650}
651
652static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) 581static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
653{ 582{
654 DECLARE_COMPLETION_ONSTACK(comp); 583 DECLARE_COMPLETION_ONSTACK(comp);
@@ -775,7 +704,6 @@ static int virtscsi_target_alloc(struct scsi_target *starget)
775 return -ENOMEM; 704 return -ENOMEM;
776 705
777 seqcount_init(&tgt->tgt_seq); 706 seqcount_init(&tgt->tgt_seq);
778 atomic_set(&tgt->reqs, 0);
779 tgt->req_vq = &vscsi->req_vqs[0]; 707 tgt->req_vq = &vscsi->req_vqs[0];
780 708
781 starget->hostdata = tgt; 709 starget->hostdata = tgt;
@@ -805,33 +733,13 @@ static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
805 return BLK_EH_RESET_TIMER; 733 return BLK_EH_RESET_TIMER;
806} 734}
807 735
808static struct scsi_host_template virtscsi_host_template_single = { 736static struct scsi_host_template virtscsi_host_template = {
809 .module = THIS_MODULE,
810 .name = "Virtio SCSI HBA",
811 .proc_name = "virtio_scsi",
812 .this_id = -1,
813 .cmd_size = sizeof(struct virtio_scsi_cmd),
814 .queuecommand = virtscsi_queuecommand_single,
815 .change_queue_depth = virtscsi_change_queue_depth,
816 .eh_abort_handler = virtscsi_abort,
817 .eh_device_reset_handler = virtscsi_device_reset,
818 .eh_timed_out = virtscsi_eh_timed_out,
819 .slave_alloc = virtscsi_device_alloc,
820
821 .dma_boundary = UINT_MAX,
822 .use_clustering = ENABLE_CLUSTERING,
823 .target_alloc = virtscsi_target_alloc,
824 .target_destroy = virtscsi_target_destroy,
825 .track_queue_depth = 1,
826};
827
828static struct scsi_host_template virtscsi_host_template_multi = {
829 .module = THIS_MODULE, 737 .module = THIS_MODULE,
830 .name = "Virtio SCSI HBA", 738 .name = "Virtio SCSI HBA",
831 .proc_name = "virtio_scsi", 739 .proc_name = "virtio_scsi",
832 .this_id = -1, 740 .this_id = -1,
833 .cmd_size = sizeof(struct virtio_scsi_cmd), 741 .cmd_size = sizeof(struct virtio_scsi_cmd),
834 .queuecommand = virtscsi_queuecommand_multi, 742 .queuecommand = virtscsi_queuecommand,
835 .change_queue_depth = virtscsi_change_queue_depth, 743 .change_queue_depth = virtscsi_change_queue_depth,
836 .eh_abort_handler = virtscsi_abort, 744 .eh_abort_handler = virtscsi_abort,
837 .eh_device_reset_handler = virtscsi_device_reset, 745 .eh_device_reset_handler = virtscsi_device_reset,
@@ -844,6 +752,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
844 .target_destroy = virtscsi_target_destroy, 752 .target_destroy = virtscsi_target_destroy,
845 .map_queues = virtscsi_map_queues, 753 .map_queues = virtscsi_map_queues,
846 .track_queue_depth = 1, 754 .track_queue_depth = 1,
755 .force_blk_mq = 1,
847}; 756};
848 757
849#define virtscsi_config_get(vdev, fld) \ 758#define virtscsi_config_get(vdev, fld) \
@@ -936,7 +845,6 @@ static int virtscsi_probe(struct virtio_device *vdev)
936 u32 sg_elems, num_targets; 845 u32 sg_elems, num_targets;
937 u32 cmd_per_lun; 846 u32 cmd_per_lun;
938 u32 num_queues; 847 u32 num_queues;
939 struct scsi_host_template *hostt;
940 848
941 if (!vdev->config->get) { 849 if (!vdev->config->get) {
942 dev_err(&vdev->dev, "%s failure: config access disabled\n", 850 dev_err(&vdev->dev, "%s failure: config access disabled\n",
@@ -949,12 +857,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
949 857
950 num_targets = virtscsi_config_get(vdev, max_target) + 1; 858 num_targets = virtscsi_config_get(vdev, max_target) + 1;
951 859
952 if (num_queues == 1) 860 shost = scsi_host_alloc(&virtscsi_host_template,
953 hostt = &virtscsi_host_template_single;
954 else
955 hostt = &virtscsi_host_template_multi;
956
957 shost = scsi_host_alloc(hostt,
958 sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues); 861 sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
959 if (!shost) 862 if (!shost)
960 return -ENOMEM; 863 return -ENOMEM;
diff --git a/drivers/staging/ncpfs/ncplib_kernel.c b/drivers/staging/ncpfs/ncplib_kernel.c
index 804adfebba2f..3e047eb4cc7c 100644
--- a/drivers/staging/ncpfs/ncplib_kernel.c
+++ b/drivers/staging/ncpfs/ncplib_kernel.c
@@ -981,6 +981,10 @@ ncp_read_kernel(struct ncp_server *server, const char *file_id,
981 goto out; 981 goto out;
982 } 982 }
983 *bytes_read = ncp_reply_be16(server, 0); 983 *bytes_read = ncp_reply_be16(server, 0);
984 if (*bytes_read > to_read) {
985 result = -EINVAL;
986 goto out;
987 }
984 source = ncp_reply_data(server, 2 + (offset & 1)); 988 source = ncp_reply_data(server, 2 + (offset & 1));
985 989
986 memcpy(target, source, *bytes_read); 990 memcpy(target, source, *bytes_read);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 88b902c525d7..b4e57c5a8bba 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1727,7 +1727,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
1727 default_attr(vc); 1727 default_attr(vc);
1728 update_attr(vc); 1728 update_attr(vc);
1729 1729
1730 vc->vc_tab_stop[0] = 0x01010100; 1730 vc->vc_tab_stop[0] =
1731 vc->vc_tab_stop[1] = 1731 vc->vc_tab_stop[1] =
1732 vc->vc_tab_stop[2] = 1732 vc->vc_tab_stop[2] =
1733 vc->vc_tab_stop[3] = 1733 vc->vc_tab_stop[3] =
@@ -1771,7 +1771,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1771 vc->vc_pos -= (vc->vc_x << 1); 1771 vc->vc_pos -= (vc->vc_x << 1);
1772 while (vc->vc_x < vc->vc_cols - 1) { 1772 while (vc->vc_x < vc->vc_cols - 1) {
1773 vc->vc_x++; 1773 vc->vc_x++;
1774 if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31))) 1774 if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31)))
1775 break; 1775 break;
1776 } 1776 }
1777 vc->vc_pos += (vc->vc_x << 1); 1777 vc->vc_pos += (vc->vc_x << 1);
@@ -1831,7 +1831,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1831 lf(vc); 1831 lf(vc);
1832 return; 1832 return;
1833 case 'H': 1833 case 'H':
1834 vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31)); 1834 vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31));
1835 return; 1835 return;
1836 case 'Z': 1836 case 'Z':
1837 respond_ID(tty); 1837 respond_ID(tty);
@@ -2024,7 +2024,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
2024 return; 2024 return;
2025 case 'g': 2025 case 'g':
2026 if (!vc->vc_par[0]) 2026 if (!vc->vc_par[0])
2027 vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31)); 2027 vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31));
2028 else if (vc->vc_par[0] == 3) { 2028 else if (vc->vc_par[0] == 3) {
2029 vc->vc_tab_stop[0] = 2029 vc->vc_tab_stop[0] =
2030 vc->vc_tab_stop[1] = 2030 vc->vc_tab_stop[1] =
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index b0f759476900..8a1508a8e481 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -207,9 +207,6 @@ static bool vfio_pci_nointx(struct pci_dev *pdev)
207 } 207 }
208 } 208 }
209 209
210 if (!pdev->irq)
211 return true;
212
213 return false; 210 return false;
214} 211}
215 212
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index a31d9b240af8..edc6fec9ad84 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -630,7 +630,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
630 630
631 if (!len && vq->busyloop_timeout) { 631 if (!len && vq->busyloop_timeout) {
632 /* Both tx vq and rx socket were polled here */ 632 /* Both tx vq and rx socket were polled here */
633 mutex_lock(&vq->mutex); 633 mutex_lock_nested(&vq->mutex, 1);
634 vhost_disable_notify(&net->dev, vq); 634 vhost_disable_notify(&net->dev, vq);
635 635
636 preempt_disable(); 636 preempt_disable();
@@ -763,7 +763,7 @@ static void handle_rx(struct vhost_net *net)
763 struct iov_iter fixup; 763 struct iov_iter fixup;
764 __virtio16 num_buffers; 764 __virtio16 num_buffers;
765 765
766 mutex_lock(&vq->mutex); 766 mutex_lock_nested(&vq->mutex, 0);
767 sock = vq->private_data; 767 sock = vq->private_data;
768 if (!sock) 768 if (!sock)
769 goto out; 769 goto out;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1b3e8d2d5c8b..5320039671b7 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -212,8 +212,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
212 if (mask) 212 if (mask)
213 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); 213 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
214 if (mask & EPOLLERR) { 214 if (mask & EPOLLERR) {
215 if (poll->wqh) 215 vhost_poll_stop(poll);
216 remove_wait_queue(poll->wqh, &poll->wait);
217 ret = -EINVAL; 216 ret = -EINVAL;
218 } 217 }
219 218
@@ -1245,14 +1244,12 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
1245/* Caller should have vq mutex and device mutex */ 1244/* Caller should have vq mutex and device mutex */
1246int vhost_vq_access_ok(struct vhost_virtqueue *vq) 1245int vhost_vq_access_ok(struct vhost_virtqueue *vq)
1247{ 1246{
1248 if (vq->iotlb) { 1247 int ret = vq_log_access_ok(vq, vq->log_base);
1249 /* When device IOTLB was used, the access validation 1248
1250 * will be validated during prefetching. 1249 if (ret || vq->iotlb)
1251 */ 1250 return ret;
1252 return 1; 1251
1253 } 1252 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1254 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
1255 vq_log_access_ok(vq, vq->log_base);
1256} 1253}
1257EXPORT_SYMBOL_GPL(vhost_vq_access_ok); 1254EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1258 1255
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 6639926eed4e..b67eec3532a1 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -640,7 +640,8 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
640struct ceph_aio_request { 640struct ceph_aio_request {
641 struct kiocb *iocb; 641 struct kiocb *iocb;
642 size_t total_len; 642 size_t total_len;
643 int write; 643 bool write;
644 bool should_dirty;
644 int error; 645 int error;
645 struct list_head osd_reqs; 646 struct list_head osd_reqs;
646 unsigned num_reqs; 647 unsigned num_reqs;
@@ -750,7 +751,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
750 } 751 }
751 } 752 }
752 753
753 ceph_put_page_vector(osd_data->pages, num_pages, !aio_req->write); 754 ceph_put_page_vector(osd_data->pages, num_pages, aio_req->should_dirty);
754 ceph_osdc_put_request(req); 755 ceph_osdc_put_request(req);
755 756
756 if (rc < 0) 757 if (rc < 0)
@@ -847,6 +848,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
847 size_t count = iov_iter_count(iter); 848 size_t count = iov_iter_count(iter);
848 loff_t pos = iocb->ki_pos; 849 loff_t pos = iocb->ki_pos;
849 bool write = iov_iter_rw(iter) == WRITE; 850 bool write = iov_iter_rw(iter) == WRITE;
851 bool should_dirty = !write && iter_is_iovec(iter);
850 852
851 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP) 853 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
852 return -EROFS; 854 return -EROFS;
@@ -914,6 +916,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
914 if (aio_req) { 916 if (aio_req) {
915 aio_req->iocb = iocb; 917 aio_req->iocb = iocb;
916 aio_req->write = write; 918 aio_req->write = write;
919 aio_req->should_dirty = should_dirty;
917 INIT_LIST_HEAD(&aio_req->osd_reqs); 920 INIT_LIST_HEAD(&aio_req->osd_reqs);
918 if (write) { 921 if (write) {
919 aio_req->mtime = mtime; 922 aio_req->mtime = mtime;
@@ -971,7 +974,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
971 len = ret; 974 len = ret;
972 } 975 }
973 976
974 ceph_put_page_vector(pages, num_pages, !write); 977 ceph_put_page_vector(pages, num_pages, should_dirty);
975 978
976 ceph_osdc_put_request(req); 979 ceph_osdc_put_request(req);
977 if (ret < 0) 980 if (ret < 0)
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index c332f0a45607..3fdfede2f0f3 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -734,11 +734,7 @@ struct fsl_ifc_nand {
734 u32 res19[0x10]; 734 u32 res19[0x10];
735 __be32 nand_fsr; 735 __be32 nand_fsr;
736 u32 res20; 736 u32 res20;
737 /* The V1 nand_eccstat is actually 4 words that overlaps the 737 __be32 nand_eccstat[8];
738 * V2 nand_eccstat.
739 */
740 __be32 v1_nand_eccstat[2];
741 __be32 v2_nand_eccstat[6];
742 u32 res21[0x1c]; 738 u32 res21[0x1c];
743 __be32 nanndcr; 739 __be32 nanndcr;
744 u32 res22[0x2]; 740 u32 res22[0x2];
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 24d1976c1e61..d11f41d5269f 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -347,13 +347,24 @@ static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
347 skb_push(skb, VLAN_HLEN); 347 skb_push(skb, VLAN_HLEN);
348 348
349 /* Move the mac header sans proto to the beginning of the new header. */ 349 /* Move the mac header sans proto to the beginning of the new header. */
350 memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); 350 if (likely(mac_len > ETH_TLEN))
351 memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
351 skb->mac_header -= VLAN_HLEN; 352 skb->mac_header -= VLAN_HLEN;
352 353
353 veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN); 354 veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
354 355
355 /* first, the ethernet type */ 356 /* first, the ethernet type */
356 veth->h_vlan_proto = vlan_proto; 357 if (likely(mac_len >= ETH_TLEN)) {
358 /* h_vlan_encapsulated_proto should already be populated, and
359 * skb->data has space for h_vlan_proto
360 */
361 veth->h_vlan_proto = vlan_proto;
362 } else {
363 /* h_vlan_encapsulated_proto should not be populated, and
364 * skb->data has no space for h_vlan_proto
365 */
366 veth->h_vlan_encapsulated_proto = skb->protocol;
367 }
357 368
358 /* now, the TCI */ 369 /* now, the TCI */
359 veth->h_vlan_TCI = htons(vlan_tci); 370 veth->h_vlan_TCI = htons(vlan_tci);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 2168cc6b8b30..b46b541c67c4 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -151,7 +151,7 @@ extern struct jump_entry __start___jump_table[];
151extern struct jump_entry __stop___jump_table[]; 151extern struct jump_entry __stop___jump_table[];
152 152
153extern void jump_label_init(void); 153extern void jump_label_init(void);
154extern void jump_label_invalidate_init(void); 154extern void jump_label_invalidate_initmem(void);
155extern void jump_label_lock(void); 155extern void jump_label_lock(void);
156extern void jump_label_unlock(void); 156extern void jump_label_unlock(void);
157extern void arch_jump_label_transform(struct jump_entry *entry, 157extern void arch_jump_label_transform(struct jump_entry *entry,
@@ -199,7 +199,7 @@ static __always_inline void jump_label_init(void)
199 static_key_initialized = true; 199 static_key_initialized = true;
200} 200}
201 201
202static inline void jump_label_invalidate_init(void) {} 202static inline void jump_label_invalidate_initmem(void) {}
203 203
204static __always_inline bool static_key_false(struct static_key *key) 204static __always_inline bool static_key_false(struct static_key *key)
205{ 205{
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
index bebeaad897cc..29ed8fd6379a 100644
--- a/include/linux/net_dim.h
+++ b/include/linux/net_dim.h
@@ -231,7 +231,7 @@ static inline void net_dim_exit_parking(struct net_dim *dim)
231} 231}
232 232
233#define IS_SIGNIFICANT_DIFF(val, ref) \ 233#define IS_SIGNIFICANT_DIFF(val, ref) \
234 (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ 234 (((100UL * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
235 235
236static inline int net_dim_stats_compare(struct net_dim_stats *curr, 236static inline int net_dim_stats_compare(struct net_dim_stats *curr,
237 struct net_dim_stats *prev) 237 struct net_dim_stats *prev)
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index fe994d2e5286..5c40f118c0fa 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -103,7 +103,7 @@ void llc_sk_reset(struct sock *sk);
103 103
104/* Access to a connection */ 104/* Access to a connection */
105int llc_conn_state_process(struct sock *sk, struct sk_buff *skb); 105int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
106void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); 106int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
107void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb); 107void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
108void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit); 108void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
109void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit); 109void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index bd2a18d66189..cd368d1b8cb8 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1072,6 +1072,8 @@ struct nft_object_ops {
1072int nft_register_obj(struct nft_object_type *obj_type); 1072int nft_register_obj(struct nft_object_type *obj_type);
1073void nft_unregister_obj(struct nft_object_type *obj_type); 1073void nft_unregister_obj(struct nft_object_type *obj_type);
1074 1074
1075#define NFT_FLOWTABLE_DEVICE_MAX 8
1076
1075/** 1077/**
1076 * struct nft_flowtable - nf_tables flow table 1078 * struct nft_flowtable - nf_tables flow table
1077 * 1079 *
@@ -1084,6 +1086,7 @@ void nft_unregister_obj(struct nft_object_type *obj_type);
1084 * @genmask: generation mask 1086 * @genmask: generation mask
1085 * @use: number of references to this flow table 1087 * @use: number of references to this flow table
1086 * @handle: unique object handle 1088 * @handle: unique object handle
1089 * @dev_name: array of device names
1087 * @data: rhashtable and garbage collector 1090 * @data: rhashtable and garbage collector
1088 * @ops: array of hooks 1091 * @ops: array of hooks
1089 */ 1092 */
@@ -1097,6 +1100,7 @@ struct nft_flowtable {
1097 u32 genmask:2, 1100 u32 genmask:2,
1098 use:30; 1101 use:30;
1099 u64 handle; 1102 u64 handle;
1103 char *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
1100 /* runtime data below here */ 1104 /* runtime data below here */
1101 struct nf_hook_ops *ops ____cacheline_aligned; 1105 struct nf_hook_ops *ops ____cacheline_aligned;
1102 struct nf_flowtable data; 1106 struct nf_flowtable data;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 493e311bbe93..5154c8300262 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -30,6 +30,7 @@ struct qdisc_rate_table {
30enum qdisc_state_t { 30enum qdisc_state_t {
31 __QDISC_STATE_SCHED, 31 __QDISC_STATE_SCHED,
32 __QDISC_STATE_DEACTIVATED, 32 __QDISC_STATE_DEACTIVATED,
33 __QDISC_STATE_RUNNING,
33}; 34};
34 35
35struct qdisc_size_table { 36struct qdisc_size_table {
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index d656809f1217..415e09960017 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -130,6 +130,8 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
130 const unsigned char *dst_dev_addr); 130 const unsigned char *dst_dev_addr);
131 131
132int rdma_addr_size(struct sockaddr *addr); 132int rdma_addr_size(struct sockaddr *addr);
133int rdma_addr_size_in6(struct sockaddr_in6 *addr);
134int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
133 135
134int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, 136int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
135 const union ib_gid *dgid, 137 const union ib_gid *dgid,
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index a8b7bf879ced..9c1e4bad6581 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -452,6 +452,9 @@ struct scsi_host_template {
452 /* True if the controller does not support WRITE SAME */ 452 /* True if the controller does not support WRITE SAME */
453 unsigned no_write_same:1; 453 unsigned no_write_same:1;
454 454
455 /* True if the low-level driver supports blk-mq only */
456 unsigned force_blk_mq:1;
457
455 /* 458 /*
456 * Countdown for host blocking with no commands outstanding. 459 * Countdown for host blocking with no commands outstanding.
457 */ 460 */
diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h
index 4fe580d36e41..f5bf06ecd87d 100644
--- a/include/uapi/linux/lirc.h
+++ b/include/uapi/linux/lirc.h
@@ -54,7 +54,6 @@
54#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) 54#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW)
55#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) 55#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE)
56#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) 56#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2)
57#define LIRC_CAN_SEND_SCANCODE LIRC_MODE2SEND(LIRC_MODE_SCANCODE)
58#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) 57#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE)
59 58
60#define LIRC_CAN_SEND_MASK 0x0000003f 59#define LIRC_CAN_SEND_MASK 0x0000003f
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index 17a022c5b414..da3315ed1bcd 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -370,7 +370,7 @@ static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_d
370{ 370{
371 return (protocol == UAC_VERSION_1) ? 371 return (protocol == UAC_VERSION_1) ?
372 desc->baSourceID[desc->bNrInPins + 4] : 372 desc->baSourceID[desc->bNrInPins + 4] :
373 desc->baSourceID[desc->bNrInPins + 6]; 373 2; /* in UAC2, this value is constant */
374} 374}
375 375
376static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc, 376static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc,
@@ -378,7 +378,7 @@ static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_de
378{ 378{
379 return (protocol == UAC_VERSION_1) ? 379 return (protocol == UAC_VERSION_1) ?
380 &desc->baSourceID[desc->bNrInPins + 5] : 380 &desc->baSourceID[desc->bNrInPins + 5] :
381 &desc->baSourceID[desc->bNrInPins + 7]; 381 &desc->baSourceID[desc->bNrInPins + 6];
382} 382}
383 383
384static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc, 384static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc,
diff --git a/init/main.c b/init/main.c
index 969eaf140ef0..21efbf6ace93 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1001,7 +1001,7 @@ static int __ref kernel_init(void *unused)
1001 /* need to finish all async __init code before freeing the memory */ 1001 /* need to finish all async __init code before freeing the memory */
1002 async_synchronize_full(); 1002 async_synchronize_full();
1003 ftrace_free_init_mem(); 1003 ftrace_free_init_mem();
1004 jump_label_invalidate_init(); 1004 jump_label_invalidate_initmem();
1005 free_initmem(); 1005 free_initmem();
1006 mark_readonly(); 1006 mark_readonly();
1007 system_state = SYSTEM_RUNNING; 1007 system_state = SYSTEM_RUNNING;
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index d7f309f74dec..a808f29d4c5a 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -325,9 +325,8 @@ err:
325static int mqueue_fill_super(struct super_block *sb, void *data, int silent) 325static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
326{ 326{
327 struct inode *inode; 327 struct inode *inode;
328 struct ipc_namespace *ns = data; 328 struct ipc_namespace *ns = sb->s_fs_info;
329 329
330 sb->s_fs_info = ns;
331 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; 330 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
332 sb->s_blocksize = PAGE_SIZE; 331 sb->s_blocksize = PAGE_SIZE;
333 sb->s_blocksize_bits = PAGE_SHIFT; 332 sb->s_blocksize_bits = PAGE_SHIFT;
@@ -344,44 +343,18 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
344 return 0; 343 return 0;
345} 344}
346 345
347static struct file_system_type mqueue_fs_type;
348/*
349 * Return value is pinned only by reference in ->mq_mnt; it will
350 * live until ipcns dies. Caller does not need to drop it.
351 */
352static struct vfsmount *mq_internal_mount(void)
353{
354 struct ipc_namespace *ns = current->nsproxy->ipc_ns;
355 struct vfsmount *m = ns->mq_mnt;
356 if (m)
357 return m;
358 m = kern_mount_data(&mqueue_fs_type, ns);
359 spin_lock(&mq_lock);
360 if (unlikely(ns->mq_mnt)) {
361 spin_unlock(&mq_lock);
362 if (!IS_ERR(m))
363 kern_unmount(m);
364 return ns->mq_mnt;
365 }
366 if (!IS_ERR(m))
367 ns->mq_mnt = m;
368 spin_unlock(&mq_lock);
369 return m;
370}
371
372static struct dentry *mqueue_mount(struct file_system_type *fs_type, 346static struct dentry *mqueue_mount(struct file_system_type *fs_type,
373 int flags, const char *dev_name, 347 int flags, const char *dev_name,
374 void *data) 348 void *data)
375{ 349{
376 struct vfsmount *m; 350 struct ipc_namespace *ns;
377 if (flags & SB_KERNMOUNT) 351 if (flags & SB_KERNMOUNT) {
378 return mount_nodev(fs_type, flags, data, mqueue_fill_super); 352 ns = data;
379 m = mq_internal_mount(); 353 data = NULL;
380 if (IS_ERR(m)) 354 } else {
381 return ERR_CAST(m); 355 ns = current->nsproxy->ipc_ns;
382 atomic_inc(&m->mnt_sb->s_active); 356 }
383 down_write(&m->mnt_sb->s_umount); 357 return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super);
384 return dget(m->mnt_root);
385} 358}
386 359
387static void init_once(void *foo) 360static void init_once(void *foo)
@@ -771,16 +744,13 @@ static int prepare_open(struct dentry *dentry, int oflag, int ro,
771static int do_mq_open(const char __user *u_name, int oflag, umode_t mode, 744static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
772 struct mq_attr *attr) 745 struct mq_attr *attr)
773{ 746{
774 struct vfsmount *mnt = mq_internal_mount(); 747 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
775 struct dentry *root; 748 struct dentry *root = mnt->mnt_root;
776 struct filename *name; 749 struct filename *name;
777 struct path path; 750 struct path path;
778 int fd, error; 751 int fd, error;
779 int ro; 752 int ro;
780 753
781 if (IS_ERR(mnt))
782 return PTR_ERR(mnt);
783
784 audit_mq_open(oflag, mode, attr); 754 audit_mq_open(oflag, mode, attr);
785 755
786 if (IS_ERR(name = getname(u_name))) 756 if (IS_ERR(name = getname(u_name)))
@@ -791,7 +761,6 @@ static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
791 goto out_putname; 761 goto out_putname;
792 762
793 ro = mnt_want_write(mnt); /* we'll drop it in any case */ 763 ro = mnt_want_write(mnt); /* we'll drop it in any case */
794 root = mnt->mnt_root;
795 inode_lock(d_inode(root)); 764 inode_lock(d_inode(root));
796 path.dentry = lookup_one_len(name->name, root, strlen(name->name)); 765 path.dentry = lookup_one_len(name->name, root, strlen(name->name));
797 if (IS_ERR(path.dentry)) { 766 if (IS_ERR(path.dentry)) {
@@ -840,9 +809,6 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
840 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 809 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
841 struct vfsmount *mnt = ipc_ns->mq_mnt; 810 struct vfsmount *mnt = ipc_ns->mq_mnt;
842 811
843 if (!mnt)
844 return -ENOENT;
845
846 name = getname(u_name); 812 name = getname(u_name);
847 if (IS_ERR(name)) 813 if (IS_ERR(name))
848 return PTR_ERR(name); 814 return PTR_ERR(name);
@@ -1569,26 +1535,28 @@ int mq_init_ns(struct ipc_namespace *ns)
1569 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1535 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
1570 ns->mq_msg_default = DFLT_MSG; 1536 ns->mq_msg_default = DFLT_MSG;
1571 ns->mq_msgsize_default = DFLT_MSGSIZE; 1537 ns->mq_msgsize_default = DFLT_MSGSIZE;
1572 ns->mq_mnt = NULL;
1573 1538
1539 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
1540 if (IS_ERR(ns->mq_mnt)) {
1541 int err = PTR_ERR(ns->mq_mnt);
1542 ns->mq_mnt = NULL;
1543 return err;
1544 }
1574 return 0; 1545 return 0;
1575} 1546}
1576 1547
1577void mq_clear_sbinfo(struct ipc_namespace *ns) 1548void mq_clear_sbinfo(struct ipc_namespace *ns)
1578{ 1549{
1579 if (ns->mq_mnt) 1550 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1580 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1581} 1551}
1582 1552
1583void mq_put_mnt(struct ipc_namespace *ns) 1553void mq_put_mnt(struct ipc_namespace *ns)
1584{ 1554{
1585 if (ns->mq_mnt) 1555 kern_unmount(ns->mq_mnt);
1586 kern_unmount(ns->mq_mnt);
1587} 1556}
1588 1557
1589static int __init init_mqueue_fs(void) 1558static int __init init_mqueue_fs(void)
1590{ 1559{
1591 struct vfsmount *m;
1592 int error; 1560 int error;
1593 1561
1594 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1562 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
@@ -1610,10 +1578,6 @@ static int __init init_mqueue_fs(void)
1610 if (error) 1578 if (error)
1611 goto out_filesystem; 1579 goto out_filesystem;
1612 1580
1613 m = kern_mount_data(&mqueue_fs_type, &init_ipc_ns);
1614 if (IS_ERR(m))
1615 goto out_filesystem;
1616 init_ipc_ns.mq_mnt = m;
1617 return 0; 1581 return 0;
1618 1582
1619out_filesystem: 1583out_filesystem:
diff --git a/ipc/shm.c b/ipc/shm.c
index 4643865e9171..93e0e3a4d009 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -386,6 +386,17 @@ static int shm_fault(struct vm_fault *vmf)
386 return sfd->vm_ops->fault(vmf); 386 return sfd->vm_ops->fault(vmf);
387} 387}
388 388
389static int shm_split(struct vm_area_struct *vma, unsigned long addr)
390{
391 struct file *file = vma->vm_file;
392 struct shm_file_data *sfd = shm_file_data(file);
393
394 if (sfd->vm_ops && sfd->vm_ops->split)
395 return sfd->vm_ops->split(vma, addr);
396
397 return 0;
398}
399
389#ifdef CONFIG_NUMA 400#ifdef CONFIG_NUMA
390static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 401static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
391{ 402{
@@ -510,6 +521,7 @@ static const struct vm_operations_struct shm_vm_ops = {
510 .open = shm_open, /* callback for a new vm-area open */ 521 .open = shm_open, /* callback for a new vm-area open */
511 .close = shm_close, /* callback for when the vm-area is released */ 522 .close = shm_close, /* callback for when the vm-area is released */
512 .fault = shm_fault, 523 .fault = shm_fault,
524 .split = shm_split,
513#if defined(CONFIG_NUMA) 525#if defined(CONFIG_NUMA)
514 .set_policy = shm_set_policy, 526 .set_policy = shm_set_policy,
515 .get_policy = shm_get_policy, 527 .get_policy = shm_get_policy,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4b838470fac4..709a55b9ad97 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -724,9 +724,15 @@ static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
724 724
725static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 725static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
726{ 726{
727 struct perf_cgroup *cgrp_out = cpuctx->cgrp; 727 struct perf_cgroup *cgrp = cpuctx->cgrp;
728 if (cgrp_out) 728 struct cgroup_subsys_state *css;
729 __update_cgrp_time(cgrp_out); 729
730 if (cgrp) {
731 for (css = &cgrp->css; css; css = css->parent) {
732 cgrp = container_of(css, struct perf_cgroup, css);
733 __update_cgrp_time(cgrp);
734 }
735 }
730} 736}
731 737
732static inline void update_cgrp_time_from_event(struct perf_event *event) 738static inline void update_cgrp_time_from_event(struct perf_event *event)
@@ -754,6 +760,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
754{ 760{
755 struct perf_cgroup *cgrp; 761 struct perf_cgroup *cgrp;
756 struct perf_cgroup_info *info; 762 struct perf_cgroup_info *info;
763 struct cgroup_subsys_state *css;
757 764
758 /* 765 /*
759 * ctx->lock held by caller 766 * ctx->lock held by caller
@@ -764,8 +771,12 @@ perf_cgroup_set_timestamp(struct task_struct *task,
764 return; 771 return;
765 772
766 cgrp = perf_cgroup_from_task(task, ctx); 773 cgrp = perf_cgroup_from_task(task, ctx);
767 info = this_cpu_ptr(cgrp->info); 774
768 info->timestamp = ctx->timestamp; 775 for (css = &cgrp->css; css; css = css->parent) {
776 cgrp = container_of(css, struct perf_cgroup, css);
777 info = this_cpu_ptr(cgrp->info);
778 info->timestamp = ctx->timestamp;
779 }
769} 780}
770 781
771static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list); 782static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index e7214093dcd1..01ebdf1f9f40 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -16,6 +16,7 @@
16#include <linux/jump_label_ratelimit.h> 16#include <linux/jump_label_ratelimit.h>
17#include <linux/bug.h> 17#include <linux/bug.h>
18#include <linux/cpu.h> 18#include <linux/cpu.h>
19#include <asm/sections.h>
19 20
20#ifdef HAVE_JUMP_LABEL 21#ifdef HAVE_JUMP_LABEL
21 22
@@ -421,15 +422,15 @@ void __init jump_label_init(void)
421 cpus_read_unlock(); 422 cpus_read_unlock();
422} 423}
423 424
424/* Disable any jump label entries in __init code */ 425/* Disable any jump label entries in __init/__exit code */
425void __init jump_label_invalidate_init(void) 426void __init jump_label_invalidate_initmem(void)
426{ 427{
427 struct jump_entry *iter_start = __start___jump_table; 428 struct jump_entry *iter_start = __start___jump_table;
428 struct jump_entry *iter_stop = __stop___jump_table; 429 struct jump_entry *iter_stop = __stop___jump_table;
429 struct jump_entry *iter; 430 struct jump_entry *iter;
430 431
431 for (iter = iter_start; iter < iter_stop; iter++) { 432 for (iter = iter_start; iter < iter_stop; iter++) {
432 if (init_kernel_text(iter->code)) 433 if (init_section_contains((void *)(unsigned long)iter->code, 1))
433 iter->code = 0; 434 iter->code = 0;
434 } 435 }
435} 436}
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 858a07590e39..2048359f33d2 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -1082,15 +1082,16 @@ static noinline int __sched
1082__mutex_lock_interruptible_slowpath(struct mutex *lock); 1082__mutex_lock_interruptible_slowpath(struct mutex *lock);
1083 1083
1084/** 1084/**
1085 * mutex_lock_interruptible - acquire the mutex, interruptible 1085 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1086 * @lock: the mutex to be acquired 1086 * @lock: The mutex to be acquired.
1087 * 1087 *
1088 * Lock the mutex like mutex_lock(), and return 0 if the mutex has 1088 * Lock the mutex like mutex_lock(). If a signal is delivered while the
1089 * been acquired or sleep until the mutex becomes available. If a 1089 * process is sleeping, this function will return without acquiring the
1090 * signal arrives while waiting for the lock then this function 1090 * mutex.
1091 * returns -EINTR.
1092 * 1091 *
1093 * This function is similar to (but not equivalent to) down_interruptible(). 1092 * Context: Process context.
1093 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1094 * signal arrived.
1094 */ 1095 */
1095int __sched mutex_lock_interruptible(struct mutex *lock) 1096int __sched mutex_lock_interruptible(struct mutex *lock)
1096{ 1097{
@@ -1104,6 +1105,18 @@ int __sched mutex_lock_interruptible(struct mutex *lock)
1104 1105
1105EXPORT_SYMBOL(mutex_lock_interruptible); 1106EXPORT_SYMBOL(mutex_lock_interruptible);
1106 1107
1108/**
1109 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1110 * @lock: The mutex to be acquired.
1111 *
1112 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
1113 * the current process is delivered while the process is sleeping, this
1114 * function will return without acquiring the mutex.
1115 *
1116 * Context: Process context.
1117 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1118 * fatal signal arrived.
1119 */
1107int __sched mutex_lock_killable(struct mutex *lock) 1120int __sched mutex_lock_killable(struct mutex *lock)
1108{ 1121{
1109 might_sleep(); 1122 might_sleep();
@@ -1115,6 +1128,16 @@ int __sched mutex_lock_killable(struct mutex *lock)
1115} 1128}
1116EXPORT_SYMBOL(mutex_lock_killable); 1129EXPORT_SYMBOL(mutex_lock_killable);
1117 1130
1131/**
1132 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1133 * @lock: The mutex to be acquired.
1134 *
1135 * Lock the mutex like mutex_lock(). While the task is waiting for this
1136 * mutex, it will be accounted as being in the IO wait state by the
1137 * scheduler.
1138 *
1139 * Context: Process context.
1140 */
1118void __sched mutex_lock_io(struct mutex *lock) 1141void __sched mutex_lock_io(struct mutex *lock)
1119{ 1142{
1120 int token; 1143 int token;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 1ca0130ed4f9..72c401b3b15c 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -32,7 +32,7 @@ static DEFINE_SPINLOCK(sched_debug_lock);
32 if (m) \ 32 if (m) \
33 seq_printf(m, x); \ 33 seq_printf(m, x); \
34 else \ 34 else \
35 printk(x); \ 35 pr_cont(x); \
36 } while (0) 36 } while (0)
37 37
38/* 38/*
@@ -501,12 +501,12 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
501{ 501{
502 struct task_struct *g, *p; 502 struct task_struct *g, *p;
503 503
504 SEQ_printf(m, 504 SEQ_printf(m, "\n");
505 "\nrunnable tasks:\n" 505 SEQ_printf(m, "runnable tasks:\n");
506 " S task PID tree-key switches prio" 506 SEQ_printf(m, " S task PID tree-key switches prio"
507 " wait-time sum-exec sum-sleep\n" 507 " wait-time sum-exec sum-sleep\n");
508 "-------------------------------------------------------" 508 SEQ_printf(m, "-------------------------------------------------------"
509 "----------------------------------------------------\n"); 509 "----------------------------------------------------\n");
510 510
511 rcu_read_lock(); 511 rcu_read_lock();
512 for_each_process_thread(g, p) { 512 for_each_process_thread(g, p) {
@@ -527,9 +527,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
527 unsigned long flags; 527 unsigned long flags;
528 528
529#ifdef CONFIG_FAIR_GROUP_SCHED 529#ifdef CONFIG_FAIR_GROUP_SCHED
530 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); 530 SEQ_printf(m, "\n");
531 SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
531#else 532#else
532 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); 533 SEQ_printf(m, "\n");
534 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
533#endif 535#endif
534 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", 536 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
535 SPLIT_NS(cfs_rq->exec_clock)); 537 SPLIT_NS(cfs_rq->exec_clock));
@@ -595,9 +597,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
595void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) 597void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
596{ 598{
597#ifdef CONFIG_RT_GROUP_SCHED 599#ifdef CONFIG_RT_GROUP_SCHED
598 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); 600 SEQ_printf(m, "\n");
601 SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
599#else 602#else
600 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu); 603 SEQ_printf(m, "\n");
604 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
601#endif 605#endif
602 606
603#define P(x) \ 607#define P(x) \
@@ -624,7 +628,8 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
624{ 628{
625 struct dl_bw *dl_bw; 629 struct dl_bw *dl_bw;
626 630
627 SEQ_printf(m, "\ndl_rq[%d]:\n", cpu); 631 SEQ_printf(m, "\n");
632 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
628 633
629#define PU(x) \ 634#define PU(x) \
630 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x)) 635 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 75043046914e..10b7186d0638 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -50,6 +50,7 @@
50#include <linux/export.h> 50#include <linux/export.h>
51#include <linux/hashtable.h> 51#include <linux/hashtable.h>
52#include <linux/compat.h> 52#include <linux/compat.h>
53#include <linux/nospec.h>
53 54
54#include "timekeeping.h" 55#include "timekeeping.h"
55#include "posix-timers.h" 56#include "posix-timers.h"
@@ -1346,11 +1347,15 @@ static const struct k_clock * const posix_clocks[] = {
1346 1347
1347static const struct k_clock *clockid_to_kclock(const clockid_t id) 1348static const struct k_clock *clockid_to_kclock(const clockid_t id)
1348{ 1349{
1349 if (id < 0) 1350 clockid_t idx = id;
1351
1352 if (id < 0) {
1350 return (id & CLOCKFD_MASK) == CLOCKFD ? 1353 return (id & CLOCKFD_MASK) == CLOCKFD ?
1351 &clock_posix_dynamic : &clock_posix_cpu; 1354 &clock_posix_dynamic : &clock_posix_cpu;
1355 }
1352 1356
1353 if (id >= ARRAY_SIZE(posix_clocks) || !posix_clocks[id]) 1357 if (id >= ARRAY_SIZE(posix_clocks))
1354 return NULL; 1358 return NULL;
1355 return posix_clocks[id]; 1359
1360 return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))];
1356} 1361}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 1fad24acd444..ae4147eaebd4 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -659,7 +659,7 @@ static int create_trace_kprobe(int argc, char **argv)
659 char *symbol = NULL, *event = NULL, *group = NULL; 659 char *symbol = NULL, *event = NULL, *group = NULL;
660 int maxactive = 0; 660 int maxactive = 0;
661 char *arg; 661 char *arg;
662 unsigned long offset = 0; 662 long offset = 0;
663 void *addr = NULL; 663 void *addr = NULL;
664 char buf[MAX_EVENT_NAME_LEN]; 664 char buf[MAX_EVENT_NAME_LEN];
665 665
@@ -747,7 +747,7 @@ static int create_trace_kprobe(int argc, char **argv)
747 symbol = argv[1]; 747 symbol = argv[1];
748 /* TODO: support .init module functions */ 748 /* TODO: support .init module functions */
749 ret = traceprobe_split_symbol_offset(symbol, &offset); 749 ret = traceprobe_split_symbol_offset(symbol, &offset);
750 if (ret) { 750 if (ret || offset < 0 || offset > UINT_MAX) {
751 pr_info("Failed to parse either an address or a symbol.\n"); 751 pr_info("Failed to parse either an address or a symbol.\n");
752 return ret; 752 return ret;
753 } 753 }
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index d59357308677..daf54bda4dc8 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -320,7 +320,7 @@ static fetch_func_t get_fetch_size_function(const struct fetch_type *type,
320} 320}
321 321
322/* Split symbol and offset. */ 322/* Split symbol and offset. */
323int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset) 323int traceprobe_split_symbol_offset(char *symbol, long *offset)
324{ 324{
325 char *tmp; 325 char *tmp;
326 int ret; 326 int ret;
@@ -328,13 +328,11 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
328 if (!offset) 328 if (!offset)
329 return -EINVAL; 329 return -EINVAL;
330 330
331 tmp = strchr(symbol, '+'); 331 tmp = strpbrk(symbol, "+-");
332 if (tmp) { 332 if (tmp) {
333 /* skip sign because kstrtoul doesn't accept '+' */ 333 ret = kstrtol(tmp, 0, offset);
334 ret = kstrtoul(tmp + 1, 0, offset);
335 if (ret) 334 if (ret)
336 return ret; 335 return ret;
337
338 *tmp = '\0'; 336 *tmp = '\0';
339 } else 337 } else
340 *offset = 0; 338 *offset = 0;
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index e101c5bb9eda..6a4d3fa94042 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -365,7 +365,7 @@ extern int traceprobe_conflict_field_name(const char *name,
365extern void traceprobe_update_arg(struct probe_arg *arg); 365extern void traceprobe_update_arg(struct probe_arg *arg);
366extern void traceprobe_free_probe_arg(struct probe_arg *arg); 366extern void traceprobe_free_probe_arg(struct probe_arg *arg);
367 367
368extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset); 368extern int traceprobe_split_symbol_offset(char *symbol, long *offset);
369 369
370/* Sum up total data length for dynamic arraies (strings) */ 370/* Sum up total data length for dynamic arraies (strings) */
371static nokprobe_inline int 371static nokprobe_inline int
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index e83987c55a08..46c2290a08f1 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1657,8 +1657,7 @@ static void start_scan_thread(void)
1657} 1657}
1658 1658
1659/* 1659/*
1660 * Stop the automatic memory scanning thread. This function must be called 1660 * Stop the automatic memory scanning thread.
1661 * with the scan_mutex held.
1662 */ 1661 */
1663static void stop_scan_thread(void) 1662static void stop_scan_thread(void)
1664{ 1663{
@@ -1921,12 +1920,15 @@ static void kmemleak_do_cleanup(struct work_struct *work)
1921{ 1920{
1922 stop_scan_thread(); 1921 stop_scan_thread();
1923 1922
1923 mutex_lock(&scan_mutex);
1924 /* 1924 /*
1925 * Once the scan thread has stopped, it is safe to no longer track 1925 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1926 * object freeing. Ordering of the scan thread stopping and the memory 1926 * longer track object freeing. Ordering of the scan thread stopping and
1927 * accesses below is guaranteed by the kthread_stop() function. 1927 * the memory accesses below is guaranteed by the kthread_stop()
1928 * function.
1928 */ 1929 */
1929 kmemleak_free_enabled = 0; 1930 kmemleak_free_enabled = 0;
1931 mutex_unlock(&scan_mutex);
1930 1932
1931 if (!kmemleak_found_leaks) 1933 if (!kmemleak_found_leaks)
1932 __kmemleak_do_cleanup(); 1934 __kmemleak_do_cleanup();
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 670e99b68aa6..9ec024b862ac 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -714,9 +714,9 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
714 * invocations for reference counting, or use mem_cgroup_iter_break() 714 * invocations for reference counting, or use mem_cgroup_iter_break()
715 * to cancel a hierarchy walk before the round-trip is complete. 715 * to cancel a hierarchy walk before the round-trip is complete.
716 * 716 *
717 * Reclaimers can specify a zone and a priority level in @reclaim to 717 * Reclaimers can specify a node and a priority level in @reclaim to
718 * divide up the memcgs in the hierarchy among all concurrent 718 * divide up the memcgs in the hierarchy among all concurrent
719 * reclaimers operating on the same zone and priority. 719 * reclaimers operating on the same node and priority.
720 */ 720 */
721struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 721struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
722 struct mem_cgroup *prev, 722 struct mem_cgroup *prev,
@@ -2299,7 +2299,7 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep)
2299} 2299}
2300 2300
2301/** 2301/**
2302 * memcg_kmem_charge: charge a kmem page 2302 * memcg_kmem_charge_memcg: charge a kmem page
2303 * @page: page to charge 2303 * @page: page to charge
2304 * @gfp: reclaim mode 2304 * @gfp: reclaim mode
2305 * @order: allocation order 2305 * @order: allocation order
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 9886c6073828..7172e0a80e13 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -123,13 +123,13 @@ void __reset_page_owner(struct page *page, unsigned int order)
123static inline bool check_recursive_alloc(struct stack_trace *trace, 123static inline bool check_recursive_alloc(struct stack_trace *trace,
124 unsigned long ip) 124 unsigned long ip)
125{ 125{
126 int i, count; 126 int i;
127 127
128 if (!trace->nr_entries) 128 if (!trace->nr_entries)
129 return false; 129 return false;
130 130
131 for (i = 0, count = 0; i < trace->nr_entries; i++) { 131 for (i = 0; i < trace->nr_entries; i++) {
132 if (trace->entries[i] == ip && ++count == 2) 132 if (trace->entries[i] == ip)
133 return true; 133 return true;
134 } 134 }
135 135
diff --git a/mm/slab.c b/mm/slab.c
index 324446621b3e..9095c3945425 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1283,6 +1283,7 @@ void __init kmem_cache_init(void)
1283 nr_node_ids * sizeof(struct kmem_cache_node *), 1283 nr_node_ids * sizeof(struct kmem_cache_node *),
1284 SLAB_HWCACHE_ALIGN, 0, 0); 1284 SLAB_HWCACHE_ALIGN, 0, 0);
1285 list_add(&kmem_cache->list, &slab_caches); 1285 list_add(&kmem_cache->list, &slab_caches);
1286 memcg_link_cache(kmem_cache);
1286 slab_state = PARTIAL; 1287 slab_state = PARTIAL;
1287 1288
1288 /* 1289 /*
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 40b2db6db6b1..33581be705f0 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1839,9 +1839,11 @@ static void vmstat_update(struct work_struct *w)
1839 * to occur in the future. Keep on running the 1839 * to occur in the future. Keep on running the
1840 * update worker thread. 1840 * update worker thread.
1841 */ 1841 */
1842 preempt_disable();
1842 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, 1843 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1843 this_cpu_ptr(&vmstat_work), 1844 this_cpu_ptr(&vmstat_work),
1844 round_jiffies_relative(sysctl_stat_interval)); 1845 round_jiffies_relative(sysctl_stat_interval));
1846 preempt_enable();
1845 } 1847 }
1846} 1848}
1847 1849
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index c294f6fd43e0..8b198ee798c9 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -746,7 +746,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
746{ 746{
747 struct batadv_neigh_node *neigh_curr = NULL; 747 struct batadv_neigh_node *neigh_curr = NULL;
748 struct batadv_neigh_node *neigh_old = NULL; 748 struct batadv_neigh_node *neigh_old = NULL;
749 struct batadv_orig_node *orig_dst_node; 749 struct batadv_orig_node *orig_dst_node = NULL;
750 struct batadv_gw_node *gw_node = NULL; 750 struct batadv_gw_node *gw_node = NULL;
751 struct batadv_gw_node *curr_gw = NULL; 751 struct batadv_gw_node *curr_gw = NULL;
752 struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo; 752 struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo;
@@ -757,6 +757,9 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
757 757
758 vid = batadv_get_vid(skb, 0); 758 vid = batadv_get_vid(skb, 0);
759 759
760 if (is_multicast_ether_addr(ethhdr->h_dest))
761 goto out;
762
760 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, 763 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
761 ethhdr->h_dest, vid); 764 ethhdr->h_dest, vid);
762 if (!orig_dst_node) 765 if (!orig_dst_node)
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index de3a055f7dd8..a11d3d89f012 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -869,8 +869,8 @@ static struct batadv_orig_node *
869batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv, 869batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
870 struct ethhdr *ethhdr) 870 struct ethhdr *ethhdr)
871{ 871{
872 return batadv_transtable_search(bat_priv, ethhdr->h_source, 872 return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
873 ethhdr->h_dest, BATADV_NO_FLAGS); 873 BATADV_NO_FLAGS);
874} 874}
875 875
876/** 876/**
diff --git a/net/core/dev.c b/net/core/dev.c
index 8edb58829124..9b04a9fd1dfd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2762,7 +2762,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2762 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 2762 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2763 return 0; 2763 return 0;
2764 2764
2765 eth = (struct ethhdr *)skb_mac_header(skb); 2765 eth = (struct ethhdr *)skb->data;
2766 type = eth->h_proto; 2766 type = eth->h_proto;
2767 } 2767 }
2768 2768
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b5c75d4fcf37..1bca1e0fc8f7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -5047,8 +5047,10 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5047 } 5047 }
5048 5048
5049 mac_len = skb->data - skb_mac_header(skb); 5049 mac_len = skb->data - skb_mac_header(skb);
5050 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 5050 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5051 mac_len - VLAN_HLEN - ETH_TLEN); 5051 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5052 mac_len - VLAN_HLEN - ETH_TLEN);
5053 }
5052 skb->mac_header += VLAN_HLEN; 5054 skb->mac_header += VLAN_HLEN;
5053 return skb; 5055 return skb;
5054} 5056}
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 5fcb17cb426b..de6d94482fe7 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -346,12 +346,17 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
346 struct ip_tunnel *nt; 346 struct ip_tunnel *nt;
347 struct net_device *dev; 347 struct net_device *dev;
348 int t_hlen; 348 int t_hlen;
349 int mtu;
350 int err;
349 351
350 dev = __ip_tunnel_create(net, itn->rtnl_link_ops, parms); 352 dev = __ip_tunnel_create(net, itn->rtnl_link_ops, parms);
351 if (IS_ERR(dev)) 353 if (IS_ERR(dev))
352 return ERR_CAST(dev); 354 return ERR_CAST(dev);
353 355
354 dev->mtu = ip_tunnel_bind_dev(dev); 356 mtu = ip_tunnel_bind_dev(dev);
357 err = dev_set_mtu(dev, mtu);
358 if (err)
359 goto err_dev_set_mtu;
355 360
356 nt = netdev_priv(dev); 361 nt = netdev_priv(dev);
357 t_hlen = nt->hlen + sizeof(struct iphdr); 362 t_hlen = nt->hlen + sizeof(struct iphdr);
@@ -359,6 +364,10 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
359 dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; 364 dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
360 ip_tunnel_add(itn, nt); 365 ip_tunnel_add(itn, nt);
361 return nt; 366 return nt;
367
368err_dev_set_mtu:
369 unregister_netdevice(dev);
370 return ERR_PTR(err);
362} 371}
363 372
364int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, 373int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
@@ -1090,17 +1099,29 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
1090 nt->fwmark = fwmark; 1099 nt->fwmark = fwmark;
1091 err = register_netdevice(dev); 1100 err = register_netdevice(dev);
1092 if (err) 1101 if (err)
1093 goto out; 1102 goto err_register_netdevice;
1094 1103
1095 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) 1104 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1096 eth_hw_addr_random(dev); 1105 eth_hw_addr_random(dev);
1097 1106
1098 mtu = ip_tunnel_bind_dev(dev); 1107 mtu = ip_tunnel_bind_dev(dev);
1099 if (!tb[IFLA_MTU]) 1108 if (tb[IFLA_MTU]) {
1100 dev->mtu = mtu; 1109 unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen;
1110
1111 mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
1112 (unsigned int)(max - sizeof(struct iphdr)));
1113 }
1114
1115 err = dev_set_mtu(dev, mtu);
1116 if (err)
1117 goto err_dev_set_mtu;
1101 1118
1102 ip_tunnel_add(itn, nt); 1119 ip_tunnel_add(itn, nt);
1103out: 1120 return 0;
1121
1122err_dev_set_mtu:
1123 unregister_netdevice(dev);
1124err_register_netdevice:
1104 return err; 1125 return err;
1105} 1126}
1106EXPORT_SYMBOL_GPL(ip_tunnel_newlink); 1127EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 51b1669334fe..3f091ccad9af 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -387,8 +387,6 @@ static int vti_tunnel_init(struct net_device *dev)
387 memcpy(dev->dev_addr, &iph->saddr, 4); 387 memcpy(dev->dev_addr, &iph->saddr, 4);
388 memcpy(dev->broadcast, &iph->daddr, 4); 388 memcpy(dev->broadcast, &iph->daddr, 4);
389 389
390 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
391 dev->mtu = ETH_DATA_LEN;
392 dev->flags = IFF_NOARP; 390 dev->flags = IFF_NOARP;
393 dev->addr_len = 4; 391 dev->addr_len = 4;
394 dev->features |= NETIF_F_LLTX; 392 dev->features |= NETIF_F_LLTX;
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 62ede5e3a3de..7523ddb2566b 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
29obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o 29obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
30 30
31nf_nat_snmp_basic-y := nf_nat_snmp_basic-asn1.o nf_nat_snmp_basic_main.o 31nf_nat_snmp_basic-y := nf_nat_snmp_basic-asn1.o nf_nat_snmp_basic_main.o
32nf_nat_snmp_basic-y : nf_nat_snmp_basic-asn1.h nf_nat_snmp_basic-asn1.c 32$(obj)/nf_nat_snmp_basic_main.o: $(obj)/nf_nat_snmp_basic-asn1.h
33obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o 33obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
34clean-files := nf_nat_snmp_basic-asn1.c nf_nat_snmp_basic-asn1.h 34clean-files := nf_nat_snmp_basic-asn1.c nf_nat_snmp_basic-asn1.h
35 35
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index b50721d9d30e..9db988f9a4d7 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -154,8 +154,20 @@ static unsigned int ipv4_conntrack_local(void *priv,
154 struct sk_buff *skb, 154 struct sk_buff *skb,
155 const struct nf_hook_state *state) 155 const struct nf_hook_state *state)
156{ 156{
157 if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */ 157 if (ip_is_fragment(ip_hdr(skb))) { /* IP_NODEFRAG setsockopt set */
158 enum ip_conntrack_info ctinfo;
159 struct nf_conn *tmpl;
160
161 tmpl = nf_ct_get(skb, &ctinfo);
162 if (tmpl && nf_ct_is_template(tmpl)) {
163 /* when skipping ct, clear templates to avoid fooling
164 * later targets/matches
165 */
166 skb->_nfct = 0;
167 nf_ct_put(tmpl);
168 }
158 return NF_ACCEPT; 169 return NF_ACCEPT;
170 }
159 171
160 return nf_conntrack_in(state->net, PF_INET, state->hook, skb); 172 return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
161} 173}
diff --git a/net/ipv4/netfilter/nf_socket_ipv4.c b/net/ipv4/netfilter/nf_socket_ipv4.c
index e9293bdebba0..4824b1e183a1 100644
--- a/net/ipv4/netfilter/nf_socket_ipv4.c
+++ b/net/ipv4/netfilter/nf_socket_ipv4.c
@@ -108,10 +108,12 @@ struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb,
108 int doff = 0; 108 int doff = 0;
109 109
110 if (iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_TCP) { 110 if (iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_TCP) {
111 struct udphdr _hdr, *hp; 111 struct tcphdr _hdr;
112 struct udphdr *hp;
112 113
113 hp = skb_header_pointer(skb, ip_hdrlen(skb), 114 hp = skb_header_pointer(skb, ip_hdrlen(skb),
114 sizeof(_hdr), &_hdr); 115 iph->protocol == IPPROTO_UDP ?
116 sizeof(*hp) : sizeof(_hdr), &_hdr);
115 if (hp == NULL) 117 if (hp == NULL)
116 return NULL; 118 return NULL;
117 119
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index fda37f2862c9..c3387dfd725b 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -349,6 +349,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
349 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; 349 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
350 treq->snt_synack = 0; 350 treq->snt_synack = 0;
351 treq->tfo_listener = false; 351 treq->tfo_listener = false;
352 if (IS_ENABLED(CONFIG_SMC))
353 ireq->smc_ok = 0;
352 354
353 ireq->ir_iif = inet_request_bound_dev_if(sk, skb); 355 ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
354 356
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 451ef3012636..367def6ddeda 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6255,6 +6255,9 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
6255 if (want_cookie && !tmp_opt.saw_tstamp) 6255 if (want_cookie && !tmp_opt.saw_tstamp)
6256 tcp_clear_options(&tmp_opt); 6256 tcp_clear_options(&tmp_opt);
6257 6257
6258 if (IS_ENABLED(CONFIG_SMC) && want_cookie)
6259 tmp_opt.smc_ok = 0;
6260
6258 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 6261 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
6259 tcp_openreq_init(req, &tmp_opt, skb, sk); 6262 tcp_openreq_init(req, &tmp_opt, skb, sk);
6260 inet_rsk(req)->no_srccheck = inet_sk(sk)->transparent; 6263 inet_rsk(req)->no_srccheck = inet_sk(sk)->transparent;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 323d7a354ffb..e6eaa4dd9f60 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1246,7 +1246,7 @@ static int __ip6_append_data(struct sock *sk,
1246 const struct sockcm_cookie *sockc) 1246 const struct sockcm_cookie *sockc)
1247{ 1247{
1248 struct sk_buff *skb, *skb_prev = NULL; 1248 struct sk_buff *skb, *skb_prev = NULL;
1249 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu; 1249 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
1250 int exthdrlen = 0; 1250 int exthdrlen = 0;
1251 int dst_exthdrlen = 0; 1251 int dst_exthdrlen = 0;
1252 int hh_len; 1252 int hh_len;
@@ -1283,6 +1283,12 @@ static int __ip6_append_data(struct sock *sk,
1283 sizeof(struct frag_hdr) : 0) + 1283 sizeof(struct frag_hdr) : 0) +
1284 rt->rt6i_nfheader_len; 1284 rt->rt6i_nfheader_len;
1285 1285
1286 /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
1287 * the first fragment
1288 */
1289 if (headersize + transhdrlen > mtu)
1290 goto emsgsize;
1291
1286 if (cork->length + length > mtu - headersize && ipc6->dontfrag && 1292 if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
1287 (sk->sk_protocol == IPPROTO_UDP || 1293 (sk->sk_protocol == IPPROTO_UDP ||
1288 sk->sk_protocol == IPPROTO_RAW)) { 1294 sk->sk_protocol == IPPROTO_RAW)) {
@@ -1298,9 +1304,8 @@ static int __ip6_append_data(struct sock *sk,
1298 1304
1299 if (cork->length + length > maxnonfragsize - headersize) { 1305 if (cork->length + length > maxnonfragsize - headersize) {
1300emsgsize: 1306emsgsize:
1301 ipv6_local_error(sk, EMSGSIZE, fl6, 1307 pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
1302 mtu - headersize + 1308 ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
1303 sizeof(struct ipv6hdr));
1304 return -EMSGSIZE; 1309 return -EMSGSIZE;
1305 } 1310 }
1306 1311
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 60b771f49fb5..6ebb2e8777f4 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -622,11 +622,12 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
622 return 0; 622 return 0;
623} 623}
624 624
625static void vti6_link_config(struct ip6_tnl *t) 625static void vti6_link_config(struct ip6_tnl *t, bool keep_mtu)
626{ 626{
627 struct net_device *dev = t->dev; 627 struct net_device *dev = t->dev;
628 struct __ip6_tnl_parm *p = &t->parms; 628 struct __ip6_tnl_parm *p = &t->parms;
629 struct net_device *tdev = NULL; 629 struct net_device *tdev = NULL;
630 int mtu;
630 631
631 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 632 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
632 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 633 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
@@ -640,6 +641,11 @@ static void vti6_link_config(struct ip6_tnl *t)
640 else 641 else
641 dev->flags &= ~IFF_POINTOPOINT; 642 dev->flags &= ~IFF_POINTOPOINT;
642 643
644 if (keep_mtu && dev->mtu) {
645 dev->mtu = clamp(dev->mtu, dev->min_mtu, dev->max_mtu);
646 return;
647 }
648
643 if (p->flags & IP6_TNL_F_CAP_XMIT) { 649 if (p->flags & IP6_TNL_F_CAP_XMIT) {
644 int strict = (ipv6_addr_type(&p->raddr) & 650 int strict = (ipv6_addr_type(&p->raddr) &
645 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); 651 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
@@ -656,20 +662,25 @@ static void vti6_link_config(struct ip6_tnl *t)
656 tdev = __dev_get_by_index(t->net, p->link); 662 tdev = __dev_get_by_index(t->net, p->link);
657 663
658 if (tdev) 664 if (tdev)
659 dev->mtu = max_t(int, tdev->mtu - dev->hard_header_len, 665 mtu = tdev->mtu - sizeof(struct ipv6hdr);
660 IPV6_MIN_MTU); 666 else
667 mtu = ETH_DATA_LEN - LL_MAX_HEADER - sizeof(struct ipv6hdr);
668
669 dev->mtu = max_t(int, mtu, IPV6_MIN_MTU);
661} 670}
662 671
663/** 672/**
664 * vti6_tnl_change - update the tunnel parameters 673 * vti6_tnl_change - update the tunnel parameters
665 * @t: tunnel to be changed 674 * @t: tunnel to be changed
666 * @p: tunnel configuration parameters 675 * @p: tunnel configuration parameters
676 * @keep_mtu: MTU was set from userspace, don't re-compute it
667 * 677 *
668 * Description: 678 * Description:
669 * vti6_tnl_change() updates the tunnel parameters 679 * vti6_tnl_change() updates the tunnel parameters
670 **/ 680 **/
671static int 681static int
672vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) 682vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
683 bool keep_mtu)
673{ 684{
674 t->parms.laddr = p->laddr; 685 t->parms.laddr = p->laddr;
675 t->parms.raddr = p->raddr; 686 t->parms.raddr = p->raddr;
@@ -679,11 +690,12 @@ vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
679 t->parms.proto = p->proto; 690 t->parms.proto = p->proto;
680 t->parms.fwmark = p->fwmark; 691 t->parms.fwmark = p->fwmark;
681 dst_cache_reset(&t->dst_cache); 692 dst_cache_reset(&t->dst_cache);
682 vti6_link_config(t); 693 vti6_link_config(t, keep_mtu);
683 return 0; 694 return 0;
684} 695}
685 696
686static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 697static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p,
698 bool keep_mtu)
687{ 699{
688 struct net *net = dev_net(t->dev); 700 struct net *net = dev_net(t->dev);
689 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 701 struct vti6_net *ip6n = net_generic(net, vti6_net_id);
@@ -691,7 +703,7 @@ static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
691 703
692 vti6_tnl_unlink(ip6n, t); 704 vti6_tnl_unlink(ip6n, t);
693 synchronize_net(); 705 synchronize_net();
694 err = vti6_tnl_change(t, p); 706 err = vti6_tnl_change(t, p, keep_mtu);
695 vti6_tnl_link(ip6n, t); 707 vti6_tnl_link(ip6n, t);
696 netdev_state_change(t->dev); 708 netdev_state_change(t->dev);
697 return err; 709 return err;
@@ -804,7 +816,7 @@ vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
804 } else 816 } else
805 t = netdev_priv(dev); 817 t = netdev_priv(dev);
806 818
807 err = vti6_update(t, &p1); 819 err = vti6_update(t, &p1, false);
808 } 820 }
809 if (t) { 821 if (t) {
810 err = 0; 822 err = 0;
@@ -866,10 +878,8 @@ static void vti6_dev_setup(struct net_device *dev)
866 dev->priv_destructor = vti6_dev_free; 878 dev->priv_destructor = vti6_dev_free;
867 879
868 dev->type = ARPHRD_TUNNEL6; 880 dev->type = ARPHRD_TUNNEL6;
869 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
870 dev->mtu = ETH_DATA_LEN;
871 dev->min_mtu = IPV6_MIN_MTU; 881 dev->min_mtu = IPV6_MIN_MTU;
872 dev->max_mtu = IP_MAX_MTU; 882 dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr);
873 dev->flags |= IFF_NOARP; 883 dev->flags |= IFF_NOARP;
874 dev->addr_len = sizeof(struct in6_addr); 884 dev->addr_len = sizeof(struct in6_addr);
875 netif_keep_dst(dev); 885 netif_keep_dst(dev);
@@ -905,7 +915,7 @@ static int vti6_dev_init(struct net_device *dev)
905 915
906 if (err) 916 if (err)
907 return err; 917 return err;
908 vti6_link_config(t); 918 vti6_link_config(t, true);
909 return 0; 919 return 0;
910} 920}
911 921
@@ -1010,7 +1020,7 @@ static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
1010 } else 1020 } else
1011 t = netdev_priv(dev); 1021 t = netdev_priv(dev);
1012 1022
1013 return vti6_update(t, &p); 1023 return vti6_update(t, &p, tb && tb[IFLA_MTU]);
1014} 1024}
1015 1025
1016static size_t vti6_get_size(const struct net_device *dev) 1026static size_t vti6_get_size(const struct net_device *dev)
diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c
index ebb2bf84232a..f14de4b6d639 100644
--- a/net/ipv6/netfilter/nf_socket_ipv6.c
+++ b/net/ipv6/netfilter/nf_socket_ipv6.c
@@ -116,9 +116,11 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
116 } 116 }
117 117
118 if (tproto == IPPROTO_UDP || tproto == IPPROTO_TCP) { 118 if (tproto == IPPROTO_UDP || tproto == IPPROTO_TCP) {
119 struct udphdr _hdr, *hp; 119 struct tcphdr _hdr;
120 struct udphdr *hp;
120 121
121 hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); 122 hp = skb_header_pointer(skb, thoff, tproto == IPPROTO_UDP ?
123 sizeof(*hp) : sizeof(_hdr), &_hdr);
122 if (hp == NULL) 124 if (hp == NULL)
123 return NULL; 125 return NULL;
124 126
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e461ef1158b6..f239f91d2efb 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -923,6 +923,9 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net,
923 struct rt6_info *rt, *rt_cache; 923 struct rt6_info *rt, *rt_cache;
924 struct fib6_node *fn; 924 struct fib6_node *fn;
925 925
926 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
927 flags &= ~RT6_LOOKUP_F_IFACE;
928
926 rcu_read_lock(); 929 rcu_read_lock();
927 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 930 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
928restart: 931restart:
@@ -1631,11 +1634,10 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
1631 struct neighbour *neigh; 1634 struct neighbour *neigh;
1632 __u8 neigh_flags = 0; 1635 __u8 neigh_flags = 0;
1633 1636
1634 neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway); 1637 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
1635 if (neigh) { 1638 if (neigh)
1636 neigh_flags = neigh->flags; 1639 neigh_flags = neigh->flags;
1637 neigh_release(neigh); 1640
1638 }
1639 if (!(neigh_flags & NTF_ROUTER)) { 1641 if (!(neigh_flags & NTF_ROUTER)) {
1640 RT6_TRACE("purging route %p via non-router but gateway\n", 1642 RT6_TRACE("purging route %p via non-router but gateway\n",
1641 rt); 1643 rt);
@@ -1659,7 +1661,8 @@ void rt6_age_exceptions(struct rt6_info *rt,
1659 if (!rcu_access_pointer(rt->rt6i_exception_bucket)) 1661 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1660 return; 1662 return;
1661 1663
1662 spin_lock_bh(&rt6_exception_lock); 1664 rcu_read_lock_bh();
1665 spin_lock(&rt6_exception_lock);
1663 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1666 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1664 lockdep_is_held(&rt6_exception_lock)); 1667 lockdep_is_held(&rt6_exception_lock));
1665 1668
@@ -1673,7 +1676,8 @@ void rt6_age_exceptions(struct rt6_info *rt,
1673 bucket++; 1676 bucket++;
1674 } 1677 }
1675 } 1678 }
1676 spin_unlock_bh(&rt6_exception_lock); 1679 spin_unlock(&rt6_exception_lock);
1680 rcu_read_unlock_bh();
1677} 1681}
1678 1682
1679struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, 1683struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 7a78dcfda68a..f343e6f0fc95 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -16,6 +16,7 @@
16#include <linux/net.h> 16#include <linux/net.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <net/ip.h> 18#include <net/ip.h>
19#include <net/ip_tunnels.h>
19#include <net/lwtunnel.h> 20#include <net/lwtunnel.h>
20#include <net/netevent.h> 21#include <net/netevent.h>
21#include <net/netns/generic.h> 22#include <net/netns/generic.h>
@@ -211,11 +212,6 @@ static int seg6_do_srh(struct sk_buff *skb)
211 212
212 tinfo = seg6_encap_lwtunnel(dst->lwtstate); 213 tinfo = seg6_encap_lwtunnel(dst->lwtstate);
213 214
214 if (likely(!skb->encapsulation)) {
215 skb_reset_inner_headers(skb);
216 skb->encapsulation = 1;
217 }
218
219 switch (tinfo->mode) { 215 switch (tinfo->mode) {
220 case SEG6_IPTUN_MODE_INLINE: 216 case SEG6_IPTUN_MODE_INLINE:
221 if (skb->protocol != htons(ETH_P_IPV6)) 217 if (skb->protocol != htons(ETH_P_IPV6))
@@ -224,10 +220,12 @@ static int seg6_do_srh(struct sk_buff *skb)
224 err = seg6_do_srh_inline(skb, tinfo->srh); 220 err = seg6_do_srh_inline(skb, tinfo->srh);
225 if (err) 221 if (err)
226 return err; 222 return err;
227
228 skb_reset_inner_headers(skb);
229 break; 223 break;
230 case SEG6_IPTUN_MODE_ENCAP: 224 case SEG6_IPTUN_MODE_ENCAP:
225 err = iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6);
226 if (err)
227 return err;
228
231 if (skb->protocol == htons(ETH_P_IPV6)) 229 if (skb->protocol == htons(ETH_P_IPV6))
232 proto = IPPROTO_IPV6; 230 proto = IPPROTO_IPV6;
233 else if (skb->protocol == htons(ETH_P_IP)) 231 else if (skb->protocol == htons(ETH_P_IP))
@@ -239,6 +237,8 @@ static int seg6_do_srh(struct sk_buff *skb)
239 if (err) 237 if (err)
240 return err; 238 return err;
241 239
240 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
241 skb_set_inner_protocol(skb, skb->protocol);
242 skb->protocol = htons(ETH_P_IPV6); 242 skb->protocol = htons(ETH_P_IPV6);
243 break; 243 break;
244 case SEG6_IPTUN_MODE_L2ENCAP: 244 case SEG6_IPTUN_MODE_L2ENCAP:
@@ -262,8 +262,6 @@ static int seg6_do_srh(struct sk_buff *skb)
262 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); 262 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
263 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 263 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
264 264
265 skb_set_inner_protocol(skb, skb->protocol);
266
267 return 0; 265 return 0;
268} 266}
269 267
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index e7a3a6b6cf56..e997141aed8c 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -217,6 +217,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
217 treq->snt_isn = cookie; 217 treq->snt_isn = cookie;
218 treq->ts_off = 0; 218 treq->ts_off = 0;
219 treq->txhash = net_tx_rndhash(); 219 treq->txhash = net_tx_rndhash();
220 if (IS_ENABLED(CONFIG_SMC))
221 ireq->smc_ok = 0;
220 222
221 /* 223 /*
222 * We need to lookup the dst_entry to get the correct window size. 224 * We need to lookup the dst_entry to get the correct window size.
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index f59648018060..163121192aca 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -389,7 +389,7 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
389 llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); 389 llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
390 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); 390 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
391 if (likely(!rc)) { 391 if (likely(!rc)) {
392 llc_conn_send_pdu(sk, skb); 392 rc = llc_conn_send_pdu(sk, skb);
393 llc_conn_ac_inc_vs_by_1(sk, skb); 393 llc_conn_ac_inc_vs_by_1(sk, skb);
394 } 394 }
395 return rc; 395 return rc;
@@ -916,7 +916,7 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
916 llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR); 916 llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
917 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); 917 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
918 if (likely(!rc)) { 918 if (likely(!rc)) {
919 llc_conn_send_pdu(sk, skb); 919 rc = llc_conn_send_pdu(sk, skb);
920 llc_conn_ac_inc_vs_by_1(sk, skb); 920 llc_conn_ac_inc_vs_by_1(sk, skb);
921 } 921 }
922 return rc; 922 return rc;
@@ -935,14 +935,17 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
935int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb) 935int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb)
936{ 936{
937 struct llc_sock *llc = llc_sk(sk); 937 struct llc_sock *llc = llc_sk(sk);
938 int ret;
938 939
939 if (llc->ack_must_be_send) { 940 if (llc->ack_must_be_send) {
940 llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb); 941 ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
941 llc->ack_must_be_send = 0 ; 942 llc->ack_must_be_send = 0 ;
942 llc->ack_pf = 0; 943 llc->ack_pf = 0;
943 } else 944 } else {
944 llc_conn_ac_send_i_cmd_p_set_0(sk, skb); 945 ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
945 return 0; 946 }
947
948 return ret;
946} 949}
947 950
948/** 951/**
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 9177dbb16dce..110e32bcb399 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -30,7 +30,7 @@
30#endif 30#endif
31 31
32static int llc_find_offset(int state, int ev_type); 32static int llc_find_offset(int state, int ev_type);
33static void llc_conn_send_pdus(struct sock *sk); 33static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
34static int llc_conn_service(struct sock *sk, struct sk_buff *skb); 34static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
35static int llc_exec_conn_trans_actions(struct sock *sk, 35static int llc_exec_conn_trans_actions(struct sock *sk,
36 struct llc_conn_state_trans *trans, 36 struct llc_conn_state_trans *trans,
@@ -193,11 +193,11 @@ out_skb_put:
193 return rc; 193 return rc;
194} 194}
195 195
196void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) 196int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
197{ 197{
198 /* queue PDU to send to MAC layer */ 198 /* queue PDU to send to MAC layer */
199 skb_queue_tail(&sk->sk_write_queue, skb); 199 skb_queue_tail(&sk->sk_write_queue, skb);
200 llc_conn_send_pdus(sk); 200 return llc_conn_send_pdus(sk, skb);
201} 201}
202 202
203/** 203/**
@@ -255,7 +255,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
255 if (howmany_resend > 0) 255 if (howmany_resend > 0)
256 llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; 256 llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
257 /* any PDUs to re-send are queued up; start sending to MAC */ 257 /* any PDUs to re-send are queued up; start sending to MAC */
258 llc_conn_send_pdus(sk); 258 llc_conn_send_pdus(sk, NULL);
259out:; 259out:;
260} 260}
261 261
@@ -296,7 +296,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
296 if (howmany_resend > 0) 296 if (howmany_resend > 0)
297 llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; 297 llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
298 /* any PDUs to re-send are queued up; start sending to MAC */ 298 /* any PDUs to re-send are queued up; start sending to MAC */
299 llc_conn_send_pdus(sk); 299 llc_conn_send_pdus(sk, NULL);
300out:; 300out:;
301} 301}
302 302
@@ -340,12 +340,16 @@ out:
340/** 340/**
341 * llc_conn_send_pdus - Sends queued PDUs 341 * llc_conn_send_pdus - Sends queued PDUs
342 * @sk: active connection 342 * @sk: active connection
343 * @hold_skb: the skb held by caller, or NULL if does not care
343 * 344 *
344 * Sends queued pdus to MAC layer for transmission. 345 * Sends queued pdus to MAC layer for transmission. When @hold_skb is
346 * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
347 * successfully, or 1 for failure.
345 */ 348 */
346static void llc_conn_send_pdus(struct sock *sk) 349static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
347{ 350{
348 struct sk_buff *skb; 351 struct sk_buff *skb;
352 int ret = 0;
349 353
350 while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) { 354 while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
351 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 355 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
@@ -357,10 +361,20 @@ static void llc_conn_send_pdus(struct sock *sk)
357 skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb); 361 skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
358 if (!skb2) 362 if (!skb2)
359 break; 363 break;
360 skb = skb2; 364 dev_queue_xmit(skb2);
365 } else {
366 bool is_target = skb == hold_skb;
367 int rc;
368
369 if (is_target)
370 skb_get(skb);
371 rc = dev_queue_xmit(skb);
372 if (is_target)
373 ret = rc;
361 } 374 }
362 dev_queue_xmit(skb);
363 } 375 }
376
377 return ret;
364} 378}
365 379
366/** 380/**
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 6e93782bbe4f..9134cc429ad4 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -74,15 +74,77 @@ static void nft_trans_destroy(struct nft_trans *trans)
74 kfree(trans); 74 kfree(trans);
75} 75}
76 76
77/* removal requests are queued in the commit_list, but not acted upon
78 * until after all new rules are in place.
79 *
80 * Therefore, nf_register_net_hook(net, &nat_hook) runs before pending
81 * nf_unregister_net_hook().
82 *
83 * nf_register_net_hook thus fails if a nat hook is already in place
84 * even if the conflicting hook is about to be removed.
85 *
86 * If collision is detected, search commit_log for DELCHAIN matching
87 * the new nat hooknum; if we find one collision is temporary:
88 *
89 * Either transaction is aborted (new/colliding hook is removed), or
90 * transaction is committed (old hook is removed).
91 */
92static bool nf_tables_allow_nat_conflict(const struct net *net,
93 const struct nf_hook_ops *ops)
94{
95 const struct nft_trans *trans;
96 bool ret = false;
97
98 if (!ops->nat_hook)
99 return false;
100
101 list_for_each_entry(trans, &net->nft.commit_list, list) {
102 const struct nf_hook_ops *pending_ops;
103 const struct nft_chain *pending;
104
105 if (trans->msg_type != NFT_MSG_NEWCHAIN &&
106 trans->msg_type != NFT_MSG_DELCHAIN)
107 continue;
108
109 pending = trans->ctx.chain;
110 if (!nft_is_base_chain(pending))
111 continue;
112
113 pending_ops = &nft_base_chain(pending)->ops;
114 if (pending_ops->nat_hook &&
115 pending_ops->pf == ops->pf &&
116 pending_ops->hooknum == ops->hooknum) {
117 /* other hook registration already pending? */
118 if (trans->msg_type == NFT_MSG_NEWCHAIN)
119 return false;
120
121 ret = true;
122 }
123 }
124
125 return ret;
126}
127
77static int nf_tables_register_hook(struct net *net, 128static int nf_tables_register_hook(struct net *net,
78 const struct nft_table *table, 129 const struct nft_table *table,
79 struct nft_chain *chain) 130 struct nft_chain *chain)
80{ 131{
132 struct nf_hook_ops *ops;
133 int ret;
134
81 if (table->flags & NFT_TABLE_F_DORMANT || 135 if (table->flags & NFT_TABLE_F_DORMANT ||
82 !nft_is_base_chain(chain)) 136 !nft_is_base_chain(chain))
83 return 0; 137 return 0;
84 138
85 return nf_register_net_hook(net, &nft_base_chain(chain)->ops); 139 ops = &nft_base_chain(chain)->ops;
140 ret = nf_register_net_hook(net, ops);
141 if (ret == -EBUSY && nf_tables_allow_nat_conflict(net, ops)) {
142 ops->nat_hook = false;
143 ret = nf_register_net_hook(net, ops);
144 ops->nat_hook = true;
145 }
146
147 return ret;
86} 148}
87 149
88static void nf_tables_unregister_hook(struct net *net, 150static void nf_tables_unregister_hook(struct net *net,
@@ -1226,8 +1288,6 @@ static void nf_tables_chain_destroy(struct nft_ctx *ctx)
1226 free_percpu(basechain->stats); 1288 free_percpu(basechain->stats);
1227 if (basechain->stats) 1289 if (basechain->stats)
1228 static_branch_dec(&nft_counters_enabled); 1290 static_branch_dec(&nft_counters_enabled);
1229 if (basechain->ops.dev != NULL)
1230 dev_put(basechain->ops.dev);
1231 kfree(chain->name); 1291 kfree(chain->name);
1232 kfree(basechain); 1292 kfree(basechain);
1233 } else { 1293 } else {
@@ -1294,7 +1354,7 @@ static int nft_chain_parse_hook(struct net *net,
1294 } 1354 }
1295 1355
1296 nla_strlcpy(ifname, ha[NFTA_HOOK_DEV], IFNAMSIZ); 1356 nla_strlcpy(ifname, ha[NFTA_HOOK_DEV], IFNAMSIZ);
1297 dev = dev_get_by_name(net, ifname); 1357 dev = __dev_get_by_name(net, ifname);
1298 if (!dev) { 1358 if (!dev) {
1299 module_put(type->owner); 1359 module_put(type->owner);
1300 return -ENOENT; 1360 return -ENOENT;
@@ -1311,8 +1371,6 @@ static int nft_chain_parse_hook(struct net *net,
1311static void nft_chain_release_hook(struct nft_chain_hook *hook) 1371static void nft_chain_release_hook(struct nft_chain_hook *hook)
1312{ 1372{
1313 module_put(hook->type->owner); 1373 module_put(hook->type->owner);
1314 if (hook->dev != NULL)
1315 dev_put(hook->dev);
1316} 1374}
1317 1375
1318static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, 1376static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
@@ -1915,6 +1973,7 @@ static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
1915 [NFTA_RULE_POSITION] = { .type = NLA_U64 }, 1973 [NFTA_RULE_POSITION] = { .type = NLA_U64 },
1916 [NFTA_RULE_USERDATA] = { .type = NLA_BINARY, 1974 [NFTA_RULE_USERDATA] = { .type = NLA_BINARY,
1917 .len = NFT_USERDATA_MAXLEN }, 1975 .len = NFT_USERDATA_MAXLEN },
1976 [NFTA_RULE_ID] = { .type = NLA_U32 },
1918}; 1977};
1919 1978
1920static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, 1979static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
@@ -2450,6 +2509,9 @@ EXPORT_SYMBOL_GPL(nft_unregister_set);
2450 2509
2451static bool nft_set_ops_candidate(const struct nft_set_ops *ops, u32 flags) 2510static bool nft_set_ops_candidate(const struct nft_set_ops *ops, u32 flags)
2452{ 2511{
2512 if ((flags & NFT_SET_EVAL) && !ops->update)
2513 return false;
2514
2453 return (flags & ops->features) == (flags & NFT_SET_FEATURES); 2515 return (flags & ops->features) == (flags & NFT_SET_FEATURES);
2454} 2516}
2455 2517
@@ -2514,7 +2576,7 @@ nft_select_set_ops(const struct nft_ctx *ctx,
2514 if (est.space == best.space && 2576 if (est.space == best.space &&
2515 est.lookup < best.lookup) 2577 est.lookup < best.lookup)
2516 break; 2578 break;
2517 } else if (est.size < best.size) { 2579 } else if (est.size < best.size || !bops) {
2518 break; 2580 break;
2519 } 2581 }
2520 continue; 2582 continue;
@@ -3319,6 +3381,8 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
3319 [NFTA_SET_ELEM_TIMEOUT] = { .type = NLA_U64 }, 3381 [NFTA_SET_ELEM_TIMEOUT] = { .type = NLA_U64 },
3320 [NFTA_SET_ELEM_USERDATA] = { .type = NLA_BINARY, 3382 [NFTA_SET_ELEM_USERDATA] = { .type = NLA_BINARY,
3321 .len = NFT_USERDATA_MAXLEN }, 3383 .len = NFT_USERDATA_MAXLEN },
3384 [NFTA_SET_ELEM_EXPR] = { .type = NLA_NESTED },
3385 [NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING },
3322}; 3386};
3323 3387
3324static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = { 3388static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
@@ -4868,8 +4932,6 @@ nf_tables_flowtable_lookup_byhandle(const struct nft_table *table,
4868 return ERR_PTR(-ENOENT); 4932 return ERR_PTR(-ENOENT);
4869} 4933}
4870 4934
4871#define NFT_FLOWTABLE_DEVICE_MAX 8
4872
4873static int nf_tables_parse_devices(const struct nft_ctx *ctx, 4935static int nf_tables_parse_devices(const struct nft_ctx *ctx,
4874 const struct nlattr *attr, 4936 const struct nlattr *attr,
4875 struct net_device *dev_array[], int *len) 4937 struct net_device *dev_array[], int *len)
@@ -4886,7 +4948,7 @@ static int nf_tables_parse_devices(const struct nft_ctx *ctx,
4886 } 4948 }
4887 4949
4888 nla_strlcpy(ifname, tmp, IFNAMSIZ); 4950 nla_strlcpy(ifname, tmp, IFNAMSIZ);
4889 dev = dev_get_by_name(ctx->net, ifname); 4951 dev = __dev_get_by_name(ctx->net, ifname);
4890 if (!dev) { 4952 if (!dev) {
4891 err = -ENOENT; 4953 err = -ENOENT;
4892 goto err1; 4954 goto err1;
@@ -4942,13 +5004,11 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
4942 err = nf_tables_parse_devices(ctx, tb[NFTA_FLOWTABLE_HOOK_DEVS], 5004 err = nf_tables_parse_devices(ctx, tb[NFTA_FLOWTABLE_HOOK_DEVS],
4943 dev_array, &n); 5005 dev_array, &n);
4944 if (err < 0) 5006 if (err < 0)
4945 goto err1; 5007 return err;
4946 5008
4947 ops = kzalloc(sizeof(struct nf_hook_ops) * n, GFP_KERNEL); 5009 ops = kzalloc(sizeof(struct nf_hook_ops) * n, GFP_KERNEL);
4948 if (!ops) { 5010 if (!ops)
4949 err = -ENOMEM; 5011 return -ENOMEM;
4950 goto err1;
4951 }
4952 5012
4953 flowtable->hooknum = hooknum; 5013 flowtable->hooknum = hooknum;
4954 flowtable->priority = priority; 5014 flowtable->priority = priority;
@@ -4962,13 +5022,10 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
4962 flowtable->ops[i].priv = &flowtable->data.rhashtable; 5022 flowtable->ops[i].priv = &flowtable->data.rhashtable;
4963 flowtable->ops[i].hook = flowtable->data.type->hook; 5023 flowtable->ops[i].hook = flowtable->data.type->hook;
4964 flowtable->ops[i].dev = dev_array[i]; 5024 flowtable->ops[i].dev = dev_array[i];
5025 flowtable->dev_name[i] = kstrdup(dev_array[i]->name,
5026 GFP_KERNEL);
4965 } 5027 }
4966 5028
4967 err = 0;
4968err1:
4969 for (i = 0; i < n; i++)
4970 dev_put(dev_array[i]);
4971
4972 return err; 5029 return err;
4973} 5030}
4974 5031
@@ -5139,8 +5196,10 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
5139err5: 5196err5:
5140 i = flowtable->ops_len; 5197 i = flowtable->ops_len;
5141err4: 5198err4:
5142 for (k = i - 1; k >= 0; k--) 5199 for (k = i - 1; k >= 0; k--) {
5200 kfree(flowtable->dev_name[k]);
5143 nf_unregister_net_hook(net, &flowtable->ops[k]); 5201 nf_unregister_net_hook(net, &flowtable->ops[k]);
5202 }
5144 5203
5145 kfree(flowtable->ops); 5204 kfree(flowtable->ops);
5146err3: 5205err3:
@@ -5230,9 +5289,9 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
5230 goto nla_put_failure; 5289 goto nla_put_failure;
5231 5290
5232 for (i = 0; i < flowtable->ops_len; i++) { 5291 for (i = 0; i < flowtable->ops_len; i++) {
5233 if (flowtable->ops[i].dev && 5292 if (flowtable->dev_name[i][0] &&
5234 nla_put_string(skb, NFTA_DEVICE_NAME, 5293 nla_put_string(skb, NFTA_DEVICE_NAME,
5235 flowtable->ops[i].dev->name)) 5294 flowtable->dev_name[i]))
5236 goto nla_put_failure; 5295 goto nla_put_failure;
5237 } 5296 }
5238 nla_nest_end(skb, nest_devs); 5297 nla_nest_end(skb, nest_devs);
@@ -5474,6 +5533,7 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev,
5474 continue; 5533 continue;
5475 5534
5476 nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]); 5535 nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]);
5536 flowtable->dev_name[i][0] = '\0';
5477 flowtable->ops[i].dev = NULL; 5537 flowtable->ops[i].dev = NULL;
5478 break; 5538 break;
5479 } 5539 }
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index d40591fe1b2f..fc9c6d5d64cd 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -674,7 +674,7 @@ static const struct nft_set_ops *
674nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc, 674nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc,
675 u32 flags) 675 u32 flags)
676{ 676{
677 if (desc->size && !(flags & NFT_SET_TIMEOUT)) { 677 if (desc->size && !(flags & (NFT_SET_EVAL | NFT_SET_TIMEOUT))) {
678 switch (desc->klen) { 678 switch (desc->klen) {
679 case 4: 679 case 4:
680 return &nft_hash_fast_ops; 680 return &nft_hash_fast_ops;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f1b02d87e336..fa556fdef57d 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1085,6 +1085,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1085 if (addr->sa_family != AF_NETLINK) 1085 if (addr->sa_family != AF_NETLINK)
1086 return -EINVAL; 1086 return -EINVAL;
1087 1087
1088 if (alen < sizeof(struct sockaddr_nl))
1089 return -EINVAL;
1090
1088 if ((nladdr->nl_groups || nladdr->nl_pid) && 1091 if ((nladdr->nl_groups || nladdr->nl_pid) &&
1089 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) 1092 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1090 return -EPERM; 1093 return -EPERM;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 0d78b58e1898..72251241665a 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -171,8 +171,10 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
171 continue; 171 continue;
172 172
173 nest = nla_nest_start(skb, n_i); 173 nest = nla_nest_start(skb, n_i);
174 if (!nest) 174 if (!nest) {
175 index--;
175 goto nla_put_failure; 176 goto nla_put_failure;
177 }
176 err = tcf_action_dump_1(skb, p, 0, 0); 178 err = tcf_action_dump_1(skb, p, 0, 0);
177 if (err < 0) { 179 if (err < 0) {
178 index--; 180 index--;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 7e3fbe9cc936..39c144b6ff98 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -373,24 +373,33 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
373 */ 373 */
374static inline bool qdisc_restart(struct Qdisc *q, int *packets) 374static inline bool qdisc_restart(struct Qdisc *q, int *packets)
375{ 375{
376 bool more, validate, nolock = q->flags & TCQ_F_NOLOCK;
376 spinlock_t *root_lock = NULL; 377 spinlock_t *root_lock = NULL;
377 struct netdev_queue *txq; 378 struct netdev_queue *txq;
378 struct net_device *dev; 379 struct net_device *dev;
379 struct sk_buff *skb; 380 struct sk_buff *skb;
380 bool validate;
381 381
382 /* Dequeue packet */ 382 /* Dequeue packet */
383 if (nolock && test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
384 return false;
385
383 skb = dequeue_skb(q, &validate, packets); 386 skb = dequeue_skb(q, &validate, packets);
384 if (unlikely(!skb)) 387 if (unlikely(!skb)) {
388 if (nolock)
389 clear_bit(__QDISC_STATE_RUNNING, &q->state);
385 return false; 390 return false;
391 }
386 392
387 if (!(q->flags & TCQ_F_NOLOCK)) 393 if (!nolock)
388 root_lock = qdisc_lock(q); 394 root_lock = qdisc_lock(q);
389 395
390 dev = qdisc_dev(q); 396 dev = qdisc_dev(q);
391 txq = skb_get_tx_queue(dev, skb); 397 txq = skb_get_tx_queue(dev, skb);
392 398
393 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); 399 more = sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
400 if (nolock)
401 clear_bit(__QDISC_STATE_RUNNING, &q->state);
402 return more;
394} 403}
395 404
396void __qdisc_run(struct Qdisc *q) 405void __qdisc_run(struct Qdisc *q)
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 64fbc3230e6c..3a988c22f627 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -304,7 +304,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
304 304
305 /* receive the complete CLC message */ 305 /* receive the complete CLC message */
306 memset(&msg, 0, sizeof(struct msghdr)); 306 memset(&msg, 0, sizeof(struct msghdr));
307 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, buflen); 307 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen);
308 krflags = MSG_WAITALL; 308 krflags = MSG_WAITALL;
309 smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME; 309 smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
310 len = sock_recvmsg(smc->clcsock, &msg, krflags); 310 len = sock_recvmsg(smc->clcsock, &msg, krflags);
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 1fdab5c4eda8..b9283ce5cd85 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -60,7 +60,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
60 struct sock *sk = strp->sk; 60 struct sock *sk = strp->sk;
61 61
62 /* Report an error on the lower socket */ 62 /* Report an error on the lower socket */
63 sk->sk_err = err; 63 sk->sk_err = -err;
64 sk->sk_error_report(sk); 64 sk->sk_error_report(sk);
65 } 65 }
66} 66}
@@ -458,7 +458,7 @@ static void strp_msg_timeout(struct work_struct *w)
458 /* Message assembly timed out */ 458 /* Message assembly timed out */
459 STRP_STATS_INCR(strp->stats.msg_timeouts); 459 STRP_STATS_INCR(strp->stats.msg_timeouts);
460 strp->cb.lock(strp); 460 strp->cb.lock(strp);
461 strp->cb.abort_parser(strp, ETIMEDOUT); 461 strp->cb.abort_parser(strp, -ETIMEDOUT);
462 strp->cb.unlock(strp); 462 strp->cb.unlock(strp);
463} 463}
464 464
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 44fc54dc013c..352abca2605f 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -27,6 +27,12 @@ struct xfrm_trans_tasklet {
27}; 27};
28 28
29struct xfrm_trans_cb { 29struct xfrm_trans_cb {
30 union {
31 struct inet_skb_parm h4;
32#if IS_ENABLED(CONFIG_IPV6)
33 struct inet6_skb_parm h6;
34#endif
35 } header;
30 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb); 36 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
31}; 37};
32 38
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 23468672a767..89b178a78dc7 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -285,8 +285,9 @@ void xfrm_local_error(struct sk_buff *skb, int mtu)
285 return; 285 return;
286 286
287 afinfo = xfrm_state_get_afinfo(proto); 287 afinfo = xfrm_state_get_afinfo(proto);
288 if (afinfo) 288 if (afinfo) {
289 afinfo->local_error(skb, mtu); 289 afinfo->local_error(skb, mtu);
290 rcu_read_unlock(); 290 rcu_read_unlock();
291 }
291} 292}
292EXPORT_SYMBOL_GPL(xfrm_local_error); 293EXPORT_SYMBOL_GPL(xfrm_local_error);
diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh
index 513da1a4a2da..d67830e6e360 100755
--- a/scripts/adjust_autoksyms.sh
+++ b/scripts/adjust_autoksyms.sh
@@ -84,6 +84,13 @@ while read sympath; do
84 depfile="include/config/ksym/${sympath}.h" 84 depfile="include/config/ksym/${sympath}.h"
85 mkdir -p "$(dirname "$depfile")" 85 mkdir -p "$(dirname "$depfile")"
86 touch "$depfile" 86 touch "$depfile"
87 # Filesystems with coarse time precision may create timestamps
88 # equal to the one from a file that was very recently built and that
89 # needs to be rebuild. Let's guard against that by making sure our
90 # dep files are always newer than the first file we created here.
91 while [ ! "$depfile" -nt "$new_ksyms_file" ]; do
92 touch "$depfile"
93 done
87 echo $((count += 1)) 94 echo $((count += 1))
88done | tail -1 ) 95done | tail -1 )
89changed=${changed:-0} 96changed=${changed:-0}
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index b4f0f2b3f8d2..13fabb1f81db 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -313,7 +313,7 @@ fi
313 313
314# Build kernel header package 314# Build kernel header package
315(cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles" 315(cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles"
316(cd $srctree; find arch/*/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles" 316(cd $srctree; find arch/*/include include scripts -type f -o -type l) >> "$objtree/debian/hdrsrcfiles"
317(cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles" 317(cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
318(cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles" 318(cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
319if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then 319if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 280027fad991..61427c6f2209 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -98,7 +98,7 @@ $M make %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} KBUILD_SRC= modules_instal
98$S$M rm -f %{buildroot}/lib/modules/$KERNELRELEASE/build 98$S$M rm -f %{buildroot}/lib/modules/$KERNELRELEASE/build
99$S$M rm -f %{buildroot}/lib/modules/$KERNELRELEASE/source 99$S$M rm -f %{buildroot}/lib/modules/$KERNELRELEASE/source
100$S$M mkdir -p %{buildroot}/usr/src/kernels/$KERNELRELEASE 100$S$M mkdir -p %{buildroot}/usr/src/kernels/$KERNELRELEASE
101$S$M tar cf - . $EXCLUDES | tar xf - -C %{buildroot}/usr/src/kernels/$KERNELRELEASE 101$S$M tar cf - $EXCLUDES . | tar xf - -C %{buildroot}/usr/src/kernels/$KERNELRELEASE
102$S$M cd %{buildroot}/lib/modules/$KERNELRELEASE 102$S$M cd %{buildroot}/lib/modules/$KERNELRELEASE
103$S$M ln -sf /usr/src/kernels/$KERNELRELEASE build 103$S$M ln -sf /usr/src/kernels/$KERNELRELEASE build
104$S$M ln -sf /usr/src/kernels/$KERNELRELEASE source 104$S$M ln -sf /usr/src/kernels/$KERNELRELEASE source
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 02298c9c6020..441405081195 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1326,7 +1326,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
1326static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const char __user *buf, size_t bytes) 1326static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const char __user *buf, size_t bytes)
1327{ 1327{
1328 size_t xfer = 0; 1328 size_t xfer = 0;
1329 ssize_t tmp; 1329 ssize_t tmp = 0;
1330 struct snd_pcm_runtime *runtime = substream->runtime; 1330 struct snd_pcm_runtime *runtime = substream->runtime;
1331 1331
1332 if (atomic_read(&substream->mmap_count)) 1332 if (atomic_read(&substream->mmap_count))
@@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
1433static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __user *buf, size_t bytes) 1433static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __user *buf, size_t bytes)
1434{ 1434{
1435 size_t xfer = 0; 1435 size_t xfer = 0;
1436 ssize_t tmp; 1436 ssize_t tmp = 0;
1437 struct snd_pcm_runtime *runtime = substream->runtime; 1437 struct snd_pcm_runtime *runtime = substream->runtime;
1438 1438
1439 if (atomic_read(&substream->mmap_count)) 1439 if (atomic_read(&substream->mmap_count))
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 77ba50ddcf9e..d18b3982548b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3422,7 +3422,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
3422 area, 3422 area,
3423 substream->runtime->dma_area, 3423 substream->runtime->dma_area,
3424 substream->runtime->dma_addr, 3424 substream->runtime->dma_addr,
3425 area->vm_end - area->vm_start); 3425 substream->runtime->dma_bytes);
3426#endif /* CONFIG_X86 */ 3426#endif /* CONFIG_X86 */
3427 /* mmap with fault handler */ 3427 /* mmap with fault handler */
3428 area->vm_ops = &snd_pcm_vm_ops_data_fault; 3428 area->vm_ops = &snd_pcm_vm_ops_data_fault;
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 0333143a1fa7..1063a4377502 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -192,6 +192,11 @@ static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
192 dpcm->timer.expires = 0; 192 dpcm->timer.expires = 0;
193} 193}
194 194
195static inline void loopback_timer_stop_sync(struct loopback_pcm *dpcm)
196{
197 del_timer_sync(&dpcm->timer);
198}
199
195#define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK) 200#define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK)
196#define CABLE_VALID_CAPTURE (1 << SNDRV_PCM_STREAM_CAPTURE) 201#define CABLE_VALID_CAPTURE (1 << SNDRV_PCM_STREAM_CAPTURE)
197#define CABLE_VALID_BOTH (CABLE_VALID_PLAYBACK|CABLE_VALID_CAPTURE) 202#define CABLE_VALID_BOTH (CABLE_VALID_PLAYBACK|CABLE_VALID_CAPTURE)
@@ -326,6 +331,8 @@ static int loopback_prepare(struct snd_pcm_substream *substream)
326 struct loopback_cable *cable = dpcm->cable; 331 struct loopback_cable *cable = dpcm->cable;
327 int bps, salign; 332 int bps, salign;
328 333
334 loopback_timer_stop_sync(dpcm);
335
329 salign = (snd_pcm_format_width(runtime->format) * 336 salign = (snd_pcm_format_width(runtime->format) *
330 runtime->channels) / 8; 337 runtime->channels) / 8;
331 bps = salign * runtime->rate; 338 bps = salign * runtime->rate;
@@ -659,7 +666,9 @@ static void free_cable(struct snd_pcm_substream *substream)
659 return; 666 return;
660 if (cable->streams[!substream->stream]) { 667 if (cable->streams[!substream->stream]) {
661 /* other stream is still alive */ 668 /* other stream is still alive */
669 spin_lock_irq(&cable->lock);
662 cable->streams[substream->stream] = NULL; 670 cable->streams[substream->stream] = NULL;
671 spin_unlock_irq(&cable->lock);
663 } else { 672 } else {
664 /* free the cable */ 673 /* free the cable */
665 loopback->cables[substream->number][dev] = NULL; 674 loopback->cables[substream->number][dev] = NULL;
@@ -698,7 +707,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
698 loopback->cables[substream->number][dev] = cable; 707 loopback->cables[substream->number][dev] = cable;
699 } 708 }
700 dpcm->cable = cable; 709 dpcm->cable = cable;
701 cable->streams[substream->stream] = dpcm;
702 710
703 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); 711 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
704 712
@@ -730,6 +738,11 @@ static int loopback_open(struct snd_pcm_substream *substream)
730 runtime->hw = loopback_pcm_hardware; 738 runtime->hw = loopback_pcm_hardware;
731 else 739 else
732 runtime->hw = cable->hw; 740 runtime->hw = cable->hw;
741
742 spin_lock_irq(&cable->lock);
743 cable->streams[substream->stream] = dpcm;
744 spin_unlock_irq(&cable->lock);
745
733 unlock: 746 unlock:
734 if (err < 0) { 747 if (err < 0) {
735 free_cable(substream); 748 free_cable(substream);
@@ -744,7 +757,7 @@ static int loopback_close(struct snd_pcm_substream *substream)
744 struct loopback *loopback = substream->private_data; 757 struct loopback *loopback = substream->private_data;
745 struct loopback_pcm *dpcm = substream->runtime->private_data; 758 struct loopback_pcm *dpcm = substream->runtime->private_data;
746 759
747 loopback_timer_stop(dpcm); 760 loopback_timer_stop_sync(dpcm);
748 mutex_lock(&loopback->cable_lock); 761 mutex_lock(&loopback->cable_lock);
749 free_cable(substream); 762 free_cable(substream);
750 mutex_unlock(&loopback->cable_lock); 763 mutex_unlock(&loopback->cable_lock);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index d5017adf9feb..c507c69029e3 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -375,6 +375,7 @@ enum {
375 ((pci)->device == 0x160c)) 375 ((pci)->device == 0x160c))
376 376
377#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) 377#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
378#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
378 379
379static char *driver_short_names[] = { 380static char *driver_short_names[] = {
380 [AZX_DRIVER_ICH] = "HDA Intel", 381 [AZX_DRIVER_ICH] = "HDA Intel",
@@ -1744,6 +1745,10 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
1744 else 1745 else
1745 chip->bdl_pos_adj = bdl_pos_adj[dev]; 1746 chip->bdl_pos_adj = bdl_pos_adj[dev];
1746 1747
1748 /* Workaround for a communication error on CFL (bko#199007) */
1749 if (IS_CFL(pci))
1750 chip->polling_mode = 1;
1751
1747 err = azx_bus_init(chip, model[dev], &pci_hda_io_ops); 1752 err = azx_bus_init(chip, model[dev], &pci_hda_io_ops);
1748 if (err < 0) { 1753 if (err < 0) {
1749 kfree(hda); 1754 kfree(hda);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9af301c6bba2..aef1f52db7d9 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3130,6 +3130,8 @@ static void alc256_init(struct hda_codec *codec)
3130 3130
3131 alc_update_coef_idx(codec, 0x46, 3 << 12, 0); 3131 alc_update_coef_idx(codec, 0x46, 3 << 12, 0);
3132 alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */ 3132 alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
3133 alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */
3134 alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15);
3133} 3135}
3134 3136
3135static void alc256_shutup(struct hda_codec *codec) 3137static void alc256_shutup(struct hda_codec *codec)
@@ -3596,8 +3598,12 @@ static void alc269_fixup_mic_mute_hook(void *private_data, int enabled)
3596 pinval = snd_hda_codec_get_pin_target(codec, spec->mute_led_nid); 3598 pinval = snd_hda_codec_get_pin_target(codec, spec->mute_led_nid);
3597 pinval &= ~AC_PINCTL_VREFEN; 3599 pinval &= ~AC_PINCTL_VREFEN;
3598 pinval |= enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80; 3600 pinval |= enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80;
3599 if (spec->mute_led_nid) 3601 if (spec->mute_led_nid) {
3602 /* temporarily power up/down for setting VREF */
3603 snd_hda_power_up_pm(codec);
3600 snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval); 3604 snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval);
3605 snd_hda_power_down_pm(codec);
3606 }
3601} 3607}
3602 3608
3603/* Make sure the led works even in runtime suspend */ 3609/* Make sure the led works even in runtime suspend */
@@ -5497,6 +5503,7 @@ enum {
5497 ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, 5503 ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
5498 ALC298_FIXUP_TPT470_DOCK, 5504 ALC298_FIXUP_TPT470_DOCK,
5499 ALC255_FIXUP_DUMMY_LINEOUT_VERB, 5505 ALC255_FIXUP_DUMMY_LINEOUT_VERB,
5506 ALC255_FIXUP_DELL_HEADSET_MIC,
5500}; 5507};
5501 5508
5502static const struct hda_fixup alc269_fixups[] = { 5509static const struct hda_fixup alc269_fixups[] = {
@@ -6357,6 +6364,13 @@ static const struct hda_fixup alc269_fixups[] = {
6357 .chained = true, 6364 .chained = true,
6358 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE 6365 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
6359 }, 6366 },
6367 [ALC255_FIXUP_DELL_HEADSET_MIC] = {
6368 .type = HDA_FIXUP_PINS,
6369 .v.pins = (const struct hda_pintbl[]) {
6370 { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
6371 { }
6372 },
6373 },
6360}; 6374};
6361 6375
6362static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6376static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6411,6 +6425,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6411 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 6425 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6412 SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), 6426 SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6413 SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), 6427 SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6428 SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
6429 SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
6414 SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), 6430 SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
6415 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6431 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6416 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6432 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -7154,6 +7170,8 @@ static int patch_alc269(struct hda_codec *codec)
7154 break; 7170 break;
7155 case 0x10ec0257: 7171 case 0x10ec0257:
7156 spec->codec_variant = ALC269_TYPE_ALC257; 7172 spec->codec_variant = ALC269_TYPE_ALC257;
7173 spec->shutup = alc256_shutup;
7174 spec->init_hook = alc256_init;
7157 spec->gen.mixer_nid = 0; 7175 spec->gen.mixer_nid = 0;
7158 break; 7176 break;
7159 case 0x10ec0215: 7177 case 0x10ec0215:
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index ea8f3de92fa4..794224e1d6df 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1171,6 +1171,7 @@ static bool is_teac_dsd_dac(unsigned int id)
1171 switch (id) { 1171 switch (id) {
1172 case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */ 1172 case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
1173 case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */ 1173 case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */
1174 case USB_ID(0x0644, 0x804a): /* TEAC UD-301 */
1174 return true; 1175 return true;
1175 } 1176 }
1176 return false; 1177 return false;
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index f95fa67bb498..f509c86faede 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -428,7 +428,7 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
428 jsonw_string_field(json_wtr, "name", info->name); 428 jsonw_string_field(json_wtr, "name", info->name);
429 429
430 jsonw_name(json_wtr, "flags"); 430 jsonw_name(json_wtr, "flags");
431 jsonw_printf(json_wtr, "%#x", info->map_flags); 431 jsonw_printf(json_wtr, "%d", info->map_flags);
432 432
433 print_dev_json(info->ifindex, info->netns_dev, info->netns_ino); 433 print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
434 434
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
new file mode 100644
index 000000000000..5ba73035e1d9
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
@@ -0,0 +1,46 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0
3# description: Kprobe event string type argument
4
5[ -f kprobe_events ] || exit_unsupported # this is configurable
6
7echo 0 > events/enable
8echo > kprobe_events
9
10case `uname -m` in
11x86_64)
12 ARG2=%si
13 OFFS=8
14;;
15i[3456]86)
16 ARG2=%cx
17 OFFS=4
18;;
19aarch64)
20 ARG2=%x1
21 OFFS=8
22;;
23arm*)
24 ARG2=%r1
25 OFFS=4
26;;
27*)
28 echo "Please implement other architecture here"
29 exit_untested
30esac
31
32: "Test get argument (1)"
33echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string" > kprobe_events
34echo 1 > events/kprobes/testprobe/enable
35! echo test >> kprobe_events
36tail -n 1 trace | grep -qe "testprobe.* arg1=\"test\""
37
38echo 0 > events/kprobes/testprobe/enable
39: "Test get argument (2)"
40echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string arg2=+0(+${OFFS}(${ARG2})):string" > kprobe_events
41echo 1 > events/kprobes/testprobe/enable
42! echo test1 test2 >> kprobe_events
43tail -n 1 trace | grep -qe "testprobe.* arg1=\"test1\" arg2=\"test2\""
44
45echo 0 > events/enable
46echo > kprobe_events
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
new file mode 100644
index 000000000000..231bcd2c4eb5
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
@@ -0,0 +1,97 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0
3# description: Kprobe event argument syntax
4
5[ -f kprobe_events ] || exit_unsupported # this is configurable
6
7grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue
8
9echo 0 > events/enable
10echo > kprobe_events
11
12PROBEFUNC="vfs_read"
13GOODREG=
14BADREG=
15GOODSYM="_sdata"
16if ! grep -qw ${GOODSYM} /proc/kallsyms ; then
17 GOODSYM=$PROBEFUNC
18fi
19BADSYM="deaqswdefr"
20SYMADDR=0x`grep -w ${GOODSYM} /proc/kallsyms | cut -f 1 -d " "`
21GOODTYPE="x16"
22BADTYPE="y16"
23
24case `uname -m` in
25x86_64|i[3456]86)
26 GOODREG=%ax
27 BADREG=%ex
28;;
29aarch64)
30 GOODREG=%x0
31 BADREG=%ax
32;;
33arm*)
34 GOODREG=%r0
35 BADREG=%ax
36;;
37esac
38
39test_goodarg() # Good-args
40{
41 while [ "$1" ]; do
42 echo "p ${PROBEFUNC} $1" > kprobe_events
43 shift 1
44 done;
45}
46
47test_badarg() # Bad-args
48{
49 while [ "$1" ]; do
50 ! echo "p ${PROBEFUNC} $1" > kprobe_events
51 shift 1
52 done;
53}
54
55echo > kprobe_events
56
57: "Register access"
58test_goodarg ${GOODREG}
59test_badarg ${BADREG}
60
61: "Symbol access"
62test_goodarg "@${GOODSYM}" "@${SYMADDR}" "@${GOODSYM}+10" "@${GOODSYM}-10"
63test_badarg "@" "@${BADSYM}" "@${GOODSYM}*10" "@${GOODSYM}/10" \
64 "@${GOODSYM}%10" "@${GOODSYM}&10" "@${GOODSYM}|10"
65
66: "Stack access"
67test_goodarg "\$stack" "\$stack0" "\$stack1"
68test_badarg "\$stackp" "\$stack0+10" "\$stack1-10"
69
70: "Retval access"
71echo "r ${PROBEFUNC} \$retval" > kprobe_events
72! echo "p ${PROBEFUNC} \$retval" > kprobe_events
73
74: "Comm access"
75test_goodarg "\$comm"
76
77: "Indirect memory access"
78test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \
79 "+0(\$stack1)" "+10(@${GOODSYM}-10)" "+0(+10(+20(\$stack)))"
80test_badarg "+(${GOODREG})" "(${GOODREG}+10)" "-(${GOODREG})" "(${GOODREG})" \
81 "+10(\$comm)" "+0(${GOODREG})+10"
82
83: "Name assignment"
84test_goodarg "varname=${GOODREG}"
85test_badarg "varname=varname2=${GOODREG}"
86
87: "Type syntax"
88test_goodarg "${GOODREG}:${GOODTYPE}"
89test_badarg "${GOODREG}::${GOODTYPE}" "${GOODREG}:${BADTYPE}" \
90 "${GOODTYPE}:${GOODREG}"
91
92: "Combination check"
93
94test_goodarg "\$comm:string" "+0(\$stack):string"
95test_badarg "\$comm:x64" "\$stack:string" "${GOODREG}:string"
96
97echo > kprobe_events
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
new file mode 100644
index 000000000000..4fda01a08da4
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
@@ -0,0 +1,43 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0
3# description: Kprobe events - probe points
4
5[ -f kprobe_events ] || exit_unsupported # this is configurable
6
7TARGET_FUNC=create_trace_kprobe
8
9dec_addr() { # hexaddr
10 printf "%d" "0x"`echo $1 | tail -c 8`
11}
12
13set_offs() { # prev target next
14 A1=`dec_addr $1`
15 A2=`dec_addr $2`
16 A3=`dec_addr $3`
17 TARGET="0x$2" # an address
18 PREV=`expr $A1 - $A2` # offset to previous symbol
19 NEXT=+`expr $A3 - $A2` # offset to next symbol
20 OVERFLOW=+`printf "0x%x" ${PREV}` # overflow offset to previous symbol
21}
22
23# We have to decode symbol addresses to get correct offsets.
24# If the offset is not an instruction boundary, it cause -EILSEQ.
25set_offs `grep -A1 -B1 ${TARGET_FUNC} /proc/kallsyms | cut -f 1 -d " " | xargs`
26
27UINT_TEST=no
28# printf "%x" -1 returns (unsigned long)-1.
29if [ `printf "%x" -1 | wc -c` != 9 ]; then
30 UINT_TEST=yes
31fi
32
33echo 0 > events/enable
34echo > kprobe_events
35echo "p:testprobe ${TARGET_FUNC}" > kprobe_events
36echo "p:testprobe ${TARGET}" > kprobe_events
37echo "p:testprobe ${TARGET_FUNC}${NEXT}" > kprobe_events
38! echo "p:testprobe ${TARGET_FUNC}${PREV}" > kprobe_events
39if [ "${UINT_TEST}" = yes ]; then
40! echo "p:testprobe ${TARGET_FUNC}${OVERFLOW}" > kprobe_events
41fi
42echo > kprobe_events
43clear_trace
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
index 1ae1c5a7392e..6f22238f3217 100644
--- a/tools/testing/selftests/x86/ptrace_syscall.c
+++ b/tools/testing/selftests/x86/ptrace_syscall.c
@@ -183,8 +183,10 @@ static void test_ptrace_syscall_restart(void)
183 if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0) 183 if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0)
184 err(1, "PTRACE_TRACEME"); 184 err(1, "PTRACE_TRACEME");
185 185
186 pid_t pid = getpid(), tid = syscall(SYS_gettid);
187
186 printf("\tChild will make one syscall\n"); 188 printf("\tChild will make one syscall\n");
187 raise(SIGSTOP); 189 syscall(SYS_tgkill, pid, tid, SIGSTOP);
188 190
189 syscall(SYS_gettid, 10, 11, 12, 13, 14, 15); 191 syscall(SYS_gettid, 10, 11, 12, 13, 14, 15);
190 _exit(0); 192 _exit(0);
@@ -301,9 +303,11 @@ static void test_restart_under_ptrace(void)
301 if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0) 303 if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0)
302 err(1, "PTRACE_TRACEME"); 304 err(1, "PTRACE_TRACEME");
303 305
306 pid_t pid = getpid(), tid = syscall(SYS_gettid);
307
304 printf("\tChild will take a nap until signaled\n"); 308 printf("\tChild will take a nap until signaled\n");
305 setsigign(SIGUSR1, SA_RESTART); 309 setsigign(SIGUSR1, SA_RESTART);
306 raise(SIGSTOP); 310 syscall(SYS_tgkill, pid, tid, SIGSTOP);
307 311
308 syscall(SYS_pause, 0, 0, 0, 0, 0, 0); 312 syscall(SYS_pause, 0, 0, 0, 0, 0, 0);
309 _exit(0); 313 _exit(0);