aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/arm/boot/dts/gemini-nas4220b.dts28
-rw-r--r--arch/arm/boot/dts/omap4.dtsi8
-rw-r--r--arch/arm/configs/gemini_defconfig27
-rw-r--r--arch/arm/configs/socfpga_defconfig1
-rw-r--r--arch/arm/mach-omap2/Makefile6
-rw-r--r--arch/arm/mach-omap2/pm-asm-offsets.c3
-rw-r--r--arch/arm/mach-omap2/sleep33xx.S1
-rw-r--r--arch/arm/mach-omap2/sleep43xx.S1
-rw-r--r--arch/arm/mach-s3c24xx/mach-jive.c4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts12
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi61
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm.dtsi17
-rw-r--r--arch/arm64/boot/dts/arm/juno-motherboard.dtsi2
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi80
-rw-r--r--arch/riscv/Kconfig4
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/s390/include/asm/thread_info.h3
-rw-r--r--arch/s390/kernel/module.c4
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c8
-rw-r--r--arch/s390/kernel/process.c10
-rw-r--r--arch/s390/kernel/uprobes.c9
-rw-r--r--arch/x86/include/asm/ftrace.h19
-rw-r--r--block/bfq-iosched.c10
-rw-r--r--block/blk-cgroup.c28
-rw-r--r--block/blk-core.c15
-rw-r--r--block/blk-mq.c41
-rw-r--r--block/blk-mq.h3
-rw-r--r--drivers/acpi/acpi_video.c27
-rw-r--r--drivers/acpi/acpi_watchdog.c59
-rw-r--r--drivers/acpi/button.c24
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c13
-rw-r--r--drivers/base/dma-coherent.c5
-rw-r--r--drivers/base/dma-mapping.c6
-rw-r--r--drivers/block/loop.c64
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/swim.c49
-rw-r--r--drivers/block/swim3.c6
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/random.c48
-rw-r--r--drivers/char/virtio_console.c157
-rw-r--r--drivers/cpufreq/Kconfig.arm10
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c323
-rw-r--r--drivers/firmware/arm_scmi/clock.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c54
-rw-r--r--drivers/gpu/drm/drm_edid.c11
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c16
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c11
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c16
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c109
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c28
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c3
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c11
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c20
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h5
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c55
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c4
-rw-r--r--drivers/hwmon/k10temp.c17
-rw-r--r--drivers/hwmon/nct6683.c4
-rw-r--r--drivers/hwmon/scmi-hwmon.c5
-rw-r--r--drivers/memory/emif-asm-offsets.c72
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c33
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c9
-rw-r--r--drivers/mtd/nand/core.c3
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c25
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c2
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c19
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c24
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c196
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c80
-rw-r--r--drivers/net/ethernet/sfc/efx.c143
-rw-r--r--drivers/net/ethernet/sfc/efx.h21
-rw-r--r--drivers/net/ethernet/sfc/farch.c41
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h36
-rw-r--r--drivers/net/ethernet/sfc/rx.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/phy/marvell.c9
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/team/team.c19
-rw-r--r--drivers/pci/dwc/pcie-kirin.c2
-rw-r--r--drivers/pci/host/pci-aardvark.c53
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/s390/block/dasd_alias.c13
-rw-r--r--drivers/s390/cio/chsc.c14
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c19
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c158
-rw-r--r--drivers/s390/net/qeth_core_mpc.h12
-rw-r--r--drivers/s390/net/qeth_l2_main.c59
-rw-r--r--drivers/scsi/fnic/fnic_trace.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c6
-rw-r--r--drivers/scsi/scsi_debug.c33
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c29
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sd_zbc.c140
-rw-r--r--drivers/scsi/ufs/ufshcd.c40
-rw-r--r--drivers/soc/bcm/raspberrypi-power.c2
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--include/linux/bpf.h4
-rw-r--r--include/linux/ethtool.h2
-rw-r--r--include/linux/fsnotify_backend.h4
-rw-r--r--include/linux/mtd/flashchip.h1
-rw-r--r--include/linux/ti-emif-sram.h75
-rw-r--r--include/linux/virtio.h3
-rw-r--r--include/net/ife.h3
-rw-r--r--include/net/llc_conn.h1
-rw-r--r--include/scsi/scsi_dbg.h2
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h4
-rw-r--r--include/trace/events/ufs.h27
-rw-r--r--include/trace/events/workqueue.h2
-rw-r--r--include/uapi/linux/virtio_balloon.h15
-rw-r--r--kernel/bpf/core.c45
-rw-r--r--kernel/bpf/sockmap.c3
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/trace/bpf_trace.c25
-rw-r--r--kernel/trace/trace_entries.h2
-rw-r--r--kernel/trace/trace_events_filter.c14
-rw-r--r--lib/dma-direct.c3
-rw-r--r--net/bridge/netfilter/ebtables.c11
-rw-r--r--net/ife/ife.c38
-rw-r--r--net/ipv4/tcp_input.c7
-rw-r--r--net/ipv6/netfilter/Kconfig55
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/seg6_iptunnel.c2
-rw-r--r--net/l2tp/l2tp_debugfs.c5
-rw-r--r--net/l2tp/l2tp_ppp.c12
-rw-r--r--net/llc/af_llc.c21
-rw-r--r--net/llc/llc_c_ac.c9
-rw-r--r--net/llc/llc_conn.c22
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c155
-rw-r--r--net/netfilter/nf_conntrack_expect.c5
-rw-r--r--net/netfilter/nf_conntrack_extend.c2
-rw-r--r--net/netfilter/nf_conntrack_sip.c16
-rw-r--r--net/netfilter/nf_tables_api.c69
-rw-r--r--net/netfilter/xt_connmark.c49
-rw-r--r--net/packet/af_packet.c60
-rw-r--r--net/packet/internal.h10
-rw-r--r--net/sched/act_ife.c9
-rw-r--r--net/strparser/strparser.c2
-rw-r--r--security/commoncap.c2
-rw-r--r--tools/testing/selftests/bpf/.gitignore3
-rw-r--r--tools/testing/selftests/bpf/test_sock.c1
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c1
-rwxr-xr-xtools/testing/selftests/bpf/test_sock_addr.sh4
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc44
189 files changed, 2533 insertions, 1514 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 92be777d060a..cc996af953b8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1208,7 +1208,6 @@ F: drivers/*/*alpine*
1208ARM/ARTPEC MACHINE SUPPORT 1208ARM/ARTPEC MACHINE SUPPORT
1209M: Jesper Nilsson <jesper.nilsson@axis.com> 1209M: Jesper Nilsson <jesper.nilsson@axis.com>
1210M: Lars Persson <lars.persson@axis.com> 1210M: Lars Persson <lars.persson@axis.com>
1211M: Niklas Cassel <niklas.cassel@axis.com>
1212S: Maintained 1211S: Maintained
1213L: linux-arm-kernel@axis.com 1212L: linux-arm-kernel@axis.com
1214F: arch/arm/mach-artpec 1213F: arch/arm/mach-artpec
@@ -2617,7 +2616,7 @@ S: Maintained
2617F: drivers/net/hamradio/baycom* 2616F: drivers/net/hamradio/baycom*
2618 2617
2619BCACHE (BLOCK LAYER CACHE) 2618BCACHE (BLOCK LAYER CACHE)
2620M: Michael Lyle <mlyle@lyle.org> 2619M: Coly Li <colyli@suse.de>
2621M: Kent Overstreet <kent.overstreet@gmail.com> 2620M: Kent Overstreet <kent.overstreet@gmail.com>
2622L: linux-bcache@vger.kernel.org 2621L: linux-bcache@vger.kernel.org
2623W: http://bcache.evilpiepirate.org 2622W: http://bcache.evilpiepirate.org
@@ -10909,7 +10908,6 @@ F: drivers/pci/host/
10909F: drivers/pci/dwc/ 10908F: drivers/pci/dwc/
10910 10909
10911PCIE DRIVER FOR AXIS ARTPEC 10910PCIE DRIVER FOR AXIS ARTPEC
10912M: Niklas Cassel <niklas.cassel@axis.com>
10913M: Jesper Nilsson <jesper.nilsson@axis.com> 10911M: Jesper Nilsson <jesper.nilsson@axis.com>
10914L: linux-arm-kernel@axis.com 10912L: linux-arm-kernel@axis.com
10915L: linux-pci@vger.kernel.org 10913L: linux-pci@vger.kernel.org
diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts
index 8bbb6f85d161..4785fbcc41ed 100644
--- a/arch/arm/boot/dts/gemini-nas4220b.dts
+++ b/arch/arm/boot/dts/gemini-nas4220b.dts
@@ -134,37 +134,37 @@
134 function = "gmii"; 134 function = "gmii";
135 groups = "gmii_gmac0_grp"; 135 groups = "gmii_gmac0_grp";
136 }; 136 };
137 /* Settings come from OpenWRT */ 137 /* Settings come from OpenWRT, pins on SL3516 */
138 conf0 { 138 conf0 {
139 pins = "R8 GMAC0 RXDV", "U11 GMAC1 RXDV"; 139 pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV";
140 skew-delay = <0>; 140 skew-delay = <0>;
141 }; 141 };
142 conf1 { 142 conf1 {
143 pins = "T8 GMAC0 RXC", "T11 GMAC1 RXC"; 143 pins = "Y7 GMAC0 RXC", "Y11 GMAC1 RXC";
144 skew-delay = <15>; 144 skew-delay = <15>;
145 }; 145 };
146 conf2 { 146 conf2 {
147 pins = "P8 GMAC0 TXEN", "V11 GMAC1 TXEN"; 147 pins = "T8 GMAC0 TXEN", "W11 GMAC1 TXEN";
148 skew-delay = <7>; 148 skew-delay = <7>;
149 }; 149 };
150 conf3 { 150 conf3 {
151 pins = "V7 GMAC0 TXC"; 151 pins = "U8 GMAC0 TXC";
152 skew-delay = <11>; 152 skew-delay = <11>;
153 }; 153 };
154 conf4 { 154 conf4 {
155 pins = "P10 GMAC1 TXC"; 155 pins = "V11 GMAC1 TXC";
156 skew-delay = <10>; 156 skew-delay = <10>;
157 }; 157 };
158 conf5 { 158 conf5 {
159 /* The data lines all have default skew */ 159 /* The data lines all have default skew */
160 pins = "U8 GMAC0 RXD0", "V8 GMAC0 RXD1", 160 pins = "W8 GMAC0 RXD0", "V9 GMAC0 RXD1",
161 "P9 GMAC0 RXD2", "R9 GMAC0 RXD3", 161 "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3",
162 "U7 GMAC0 TXD0", "T7 GMAC0 TXD1", 162 "T7 GMAC0 TXD0", "U6 GMAC0 TXD1",
163 "R7 GMAC0 TXD2", "P7 GMAC0 TXD3", 163 "V7 GMAC0 TXD2", "U7 GMAC0 TXD3",
164 "R11 GMAC1 RXD0", "P11 GMAC1 RXD1", 164 "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1",
165 "V12 GMAC1 RXD2", "U12 GMAC1 RXD3", 165 "T11 GMAC1 RXD2", "W12 GMAC1 RXD3",
166 "R10 GMAC1 TXD0", "T10 GMAC1 TXD1", 166 "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1",
167 "U10 GMAC1 TXD2", "V10 GMAC1 TXD3"; 167 "W10 GMAC1 TXD2", "T9 GMAC1 TXD3";
168 skew-delay = <7>; 168 skew-delay = <7>;
169 }; 169 };
170 /* Set up drive strength on GMAC0 to 16 mA */ 170 /* Set up drive strength on GMAC0 to 16 mA */
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 475904894b86..e554b6e039f3 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -163,10 +163,10 @@
163 163
164 cm2: cm2@8000 { 164 cm2: cm2@8000 {
165 compatible = "ti,omap4-cm2", "simple-bus"; 165 compatible = "ti,omap4-cm2", "simple-bus";
166 reg = <0x8000 0x3000>; 166 reg = <0x8000 0x2000>;
167 #address-cells = <1>; 167 #address-cells = <1>;
168 #size-cells = <1>; 168 #size-cells = <1>;
169 ranges = <0 0x8000 0x3000>; 169 ranges = <0 0x8000 0x2000>;
170 170
171 cm2_clocks: clocks { 171 cm2_clocks: clocks {
172 #address-cells = <1>; 172 #address-cells = <1>;
@@ -250,11 +250,11 @@
250 250
251 prm: prm@6000 { 251 prm: prm@6000 {
252 compatible = "ti,omap4-prm"; 252 compatible = "ti,omap4-prm";
253 reg = <0x6000 0x3000>; 253 reg = <0x6000 0x2000>;
254 interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; 254 interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
255 #address-cells = <1>; 255 #address-cells = <1>;
256 #size-cells = <1>; 256 #size-cells = <1>;
257 ranges = <0 0x6000 0x3000>; 257 ranges = <0 0x6000 0x2000>;
258 258
259 prm_clocks: clocks { 259 prm_clocks: clocks {
260 #address-cells = <1>; 260 #address-cells = <1>;
diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig
index 2a63fa10c813..553777ac2814 100644
--- a/arch/arm/configs/gemini_defconfig
+++ b/arch/arm/configs/gemini_defconfig
@@ -1,6 +1,7 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_NO_HZ_IDLE=y 3CONFIG_NO_HZ_IDLE=y
4CONFIG_HIGH_RES_TIMERS=y
4CONFIG_BSD_PROCESS_ACCT=y 5CONFIG_BSD_PROCESS_ACCT=y
5CONFIG_USER_NS=y 6CONFIG_USER_NS=y
6CONFIG_RELAY=y 7CONFIG_RELAY=y
@@ -12,15 +13,21 @@ CONFIG_ARCH_GEMINI=y
12CONFIG_PCI=y 13CONFIG_PCI=y
13CONFIG_PREEMPT=y 14CONFIG_PREEMPT=y
14CONFIG_AEABI=y 15CONFIG_AEABI=y
16CONFIG_HIGHMEM=y
17CONFIG_CMA=y
15CONFIG_CMDLINE="console=ttyS0,115200n8" 18CONFIG_CMDLINE="console=ttyS0,115200n8"
16CONFIG_KEXEC=y 19CONFIG_KEXEC=y
17CONFIG_BINFMT_MISC=y 20CONFIG_BINFMT_MISC=y
18CONFIG_PM=y 21CONFIG_PM=y
22CONFIG_NET=y
23CONFIG_UNIX=y
24CONFIG_INET=y
19CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 25CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
20CONFIG_DEVTMPFS=y 26CONFIG_DEVTMPFS=y
21CONFIG_MTD=y 27CONFIG_MTD=y
22CONFIG_MTD_BLOCK=y 28CONFIG_MTD_BLOCK=y
23CONFIG_MTD_CFI=y 29CONFIG_MTD_CFI=y
30CONFIG_MTD_JEDECPROBE=y
24CONFIG_MTD_CFI_INTELEXT=y 31CONFIG_MTD_CFI_INTELEXT=y
25CONFIG_MTD_CFI_AMDSTD=y 32CONFIG_MTD_CFI_AMDSTD=y
26CONFIG_MTD_CFI_STAA=y 33CONFIG_MTD_CFI_STAA=y
@@ -33,6 +40,11 @@ CONFIG_BLK_DEV_SD=y
33# CONFIG_SCSI_LOWLEVEL is not set 40# CONFIG_SCSI_LOWLEVEL is not set
34CONFIG_ATA=y 41CONFIG_ATA=y
35CONFIG_PATA_FTIDE010=y 42CONFIG_PATA_FTIDE010=y
43CONFIG_NETDEVICES=y
44CONFIG_GEMINI_ETHERNET=y
45CONFIG_MDIO_BITBANG=y
46CONFIG_MDIO_GPIO=y
47CONFIG_REALTEK_PHY=y
36CONFIG_INPUT_EVDEV=y 48CONFIG_INPUT_EVDEV=y
37CONFIG_KEYBOARD_GPIO=y 49CONFIG_KEYBOARD_GPIO=y
38# CONFIG_INPUT_MOUSE is not set 50# CONFIG_INPUT_MOUSE is not set
@@ -43,9 +55,19 @@ CONFIG_SERIAL_8250_NR_UARTS=1
43CONFIG_SERIAL_8250_RUNTIME_UARTS=1 55CONFIG_SERIAL_8250_RUNTIME_UARTS=1
44CONFIG_SERIAL_OF_PLATFORM=y 56CONFIG_SERIAL_OF_PLATFORM=y
45# CONFIG_HW_RANDOM is not set 57# CONFIG_HW_RANDOM is not set
46# CONFIG_HWMON is not set 58CONFIG_I2C_GPIO=y
59CONFIG_SPI=y
60CONFIG_SPI_GPIO=y
61CONFIG_SENSORS_GPIO_FAN=y
62CONFIG_SENSORS_LM75=y
63CONFIG_THERMAL=y
47CONFIG_WATCHDOG=y 64CONFIG_WATCHDOG=y
48CONFIG_GEMINI_WATCHDOG=y 65CONFIG_REGULATOR=y
66CONFIG_REGULATOR_FIXED_VOLTAGE=y
67CONFIG_DRM=y
68CONFIG_DRM_PANEL_ILITEK_IL9322=y
69CONFIG_DRM_TVE200=y
70CONFIG_LOGO=y
49CONFIG_USB=y 71CONFIG_USB=y
50CONFIG_USB_MON=y 72CONFIG_USB_MON=y
51CONFIG_USB_FOTG210_HCD=y 73CONFIG_USB_FOTG210_HCD=y
@@ -54,6 +76,7 @@ CONFIG_NEW_LEDS=y
54CONFIG_LEDS_CLASS=y 76CONFIG_LEDS_CLASS=y
55CONFIG_LEDS_GPIO=y 77CONFIG_LEDS_GPIO=y
56CONFIG_LEDS_TRIGGERS=y 78CONFIG_LEDS_TRIGGERS=y
79CONFIG_LEDS_TRIGGER_DISK=y
57CONFIG_LEDS_TRIGGER_HEARTBEAT=y 80CONFIG_LEDS_TRIGGER_HEARTBEAT=y
58CONFIG_RTC_CLASS=y 81CONFIG_RTC_CLASS=y
59CONFIG_DMADEVICES=y 82CONFIG_DMADEVICES=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index 2620ce790db0..371fca4e1ab7 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -57,6 +57,7 @@ CONFIG_MTD_M25P80=y
57CONFIG_MTD_NAND=y 57CONFIG_MTD_NAND=y
58CONFIG_MTD_NAND_DENALI_DT=y 58CONFIG_MTD_NAND_DENALI_DT=y
59CONFIG_MTD_SPI_NOR=y 59CONFIG_MTD_SPI_NOR=y
60# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
60CONFIG_SPI_CADENCE_QUADSPI=y 61CONFIG_SPI_CADENCE_QUADSPI=y
61CONFIG_OF_OVERLAY=y 62CONFIG_OF_OVERLAY=y
62CONFIG_OF_CONFIGFS=y 63CONFIG_OF_CONFIGFS=y
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 4603c30fef73..0d9ce58bc464 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -243,8 +243,4 @@ arch/arm/mach-omap2/pm-asm-offsets.s: arch/arm/mach-omap2/pm-asm-offsets.c
243include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE 243include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE
244 $(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__) 244 $(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__)
245 245
246# For rule to generate ti-emif-asm-offsets.h dependency 246$(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h
247include drivers/memory/Makefile.asm-offsets
248
249arch/arm/mach-omap2/sleep33xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h
250arch/arm/mach-omap2/sleep43xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h
diff --git a/arch/arm/mach-omap2/pm-asm-offsets.c b/arch/arm/mach-omap2/pm-asm-offsets.c
index 6d4392da7c11..b9846b19e5e2 100644
--- a/arch/arm/mach-omap2/pm-asm-offsets.c
+++ b/arch/arm/mach-omap2/pm-asm-offsets.c
@@ -7,9 +7,12 @@
7 7
8#include <linux/kbuild.h> 8#include <linux/kbuild.h>
9#include <linux/platform_data/pm33xx.h> 9#include <linux/platform_data/pm33xx.h>
10#include <linux/ti-emif-sram.h>
10 11
11int main(void) 12int main(void)
12{ 13{
14 ti_emif_asm_offsets();
15
13 DEFINE(AMX3_PM_WFI_FLAGS_OFFSET, 16 DEFINE(AMX3_PM_WFI_FLAGS_OFFSET,
14 offsetof(struct am33xx_pm_sram_data, wfi_flags)); 17 offsetof(struct am33xx_pm_sram_data, wfi_flags));
15 DEFINE(AMX3_PM_L2_AUX_CTRL_VAL_OFFSET, 18 DEFINE(AMX3_PM_L2_AUX_CTRL_VAL_OFFSET,
diff --git a/arch/arm/mach-omap2/sleep33xx.S b/arch/arm/mach-omap2/sleep33xx.S
index 218d79930b04..322b3bb868b4 100644
--- a/arch/arm/mach-omap2/sleep33xx.S
+++ b/arch/arm/mach-omap2/sleep33xx.S
@@ -6,7 +6,6 @@
6 * Dave Gerlach, Vaibhav Bedia 6 * Dave Gerlach, Vaibhav Bedia
7 */ 7 */
8 8
9#include <generated/ti-emif-asm-offsets.h>
10#include <generated/ti-pm-asm-offsets.h> 9#include <generated/ti-pm-asm-offsets.h>
11#include <linux/linkage.h> 10#include <linux/linkage.h>
12#include <linux/ti-emif-sram.h> 11#include <linux/ti-emif-sram.h>
diff --git a/arch/arm/mach-omap2/sleep43xx.S b/arch/arm/mach-omap2/sleep43xx.S
index b24be624e8b9..8903814a6677 100644
--- a/arch/arm/mach-omap2/sleep43xx.S
+++ b/arch/arm/mach-omap2/sleep43xx.S
@@ -6,7 +6,6 @@
6 * Dave Gerlach, Vaibhav Bedia 6 * Dave Gerlach, Vaibhav Bedia
7 */ 7 */
8 8
9#include <generated/ti-emif-asm-offsets.h>
10#include <generated/ti-pm-asm-offsets.h> 9#include <generated/ti-pm-asm-offsets.h>
11#include <linux/linkage.h> 10#include <linux/linkage.h>
12#include <linux/ti-emif-sram.h> 11#include <linux/ti-emif-sram.h>
diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c
index 59589a4a0d4b..885e8f12e4b9 100644
--- a/arch/arm/mach-s3c24xx/mach-jive.c
+++ b/arch/arm/mach-s3c24xx/mach-jive.c
@@ -427,9 +427,9 @@ static struct gpiod_lookup_table jive_wm8750_gpiod_table = {
427 .dev_id = "spi_gpio", 427 .dev_id = "spi_gpio",
428 .table = { 428 .table = {
429 GPIO_LOOKUP("GPIOB", 4, 429 GPIO_LOOKUP("GPIOB", 4,
430 "gpio-sck", GPIO_ACTIVE_HIGH), 430 "sck", GPIO_ACTIVE_HIGH),
431 GPIO_LOOKUP("GPIOB", 9, 431 GPIO_LOOKUP("GPIOB", 9,
432 "gpio-mosi", GPIO_ACTIVE_HIGH), 432 "mosi", GPIO_ACTIVE_HIGH),
433 GPIO_LOOKUP("GPIOH", 10, 433 GPIO_LOOKUP("GPIOH", 10,
434 "cs", GPIO_ACTIVE_HIGH), 434 "cs", GPIO_ACTIVE_HIGH),
435 { }, 435 { },
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index 4eef36b22538..88e712ea757a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -212,3 +212,7 @@
212 pinctrl-0 = <&uart_ao_a_pins>; 212 pinctrl-0 = <&uart_ao_a_pins>;
213 pinctrl-names = "default"; 213 pinctrl-names = "default";
214}; 214};
215
216&usb0 {
217 status = "okay";
218};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
index 22bf37404ff1..3e3eb31748a3 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -271,3 +271,15 @@
271 pinctrl-0 = <&uart_ao_a_pins>; 271 pinctrl-0 = <&uart_ao_a_pins>;
272 pinctrl-names = "default"; 272 pinctrl-names = "default";
273}; 273};
274
275&usb0 {
276 status = "okay";
277};
278
279&usb2_phy0 {
280 /*
281 * even though the schematics don't show it:
282 * HDMI_5V is also used as supply for the USB VBUS.
283 */
284 phy-supply = <&hdmi_5v>;
285};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
index 69c721a70e44..6739697be1de 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
@@ -215,3 +215,7 @@
215 pinctrl-0 = <&uart_ao_a_pins>; 215 pinctrl-0 = <&uart_ao_a_pins>;
216 pinctrl-names = "default"; 216 pinctrl-names = "default";
217}; 217};
218
219&usb0 {
220 status = "okay";
221};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
index 0a0953fbc7d4..0cfd701809de 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -185,3 +185,7 @@
185 pinctrl-0 = <&uart_ao_a_pins>; 185 pinctrl-0 = <&uart_ao_a_pins>;
186 pinctrl-names = "default"; 186 pinctrl-names = "default";
187}; 187};
188
189&usb0 {
190 status = "okay";
191};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index e1a39cbed8c9..dba365ed4bd5 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -20,6 +20,67 @@
20 no-map; 20 no-map;
21 }; 21 };
22 }; 22 };
23
24 soc {
25 usb0: usb@c9000000 {
26 status = "disabled";
27 compatible = "amlogic,meson-gxl-dwc3";
28 #address-cells = <2>;
29 #size-cells = <2>;
30 ranges;
31
32 clocks = <&clkc CLKID_USB>;
33 clock-names = "usb_general";
34 resets = <&reset RESET_USB_OTG>;
35 reset-names = "usb_otg";
36
37 dwc3: dwc3@c9000000 {
38 compatible = "snps,dwc3";
39 reg = <0x0 0xc9000000 0x0 0x100000>;
40 interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
41 dr_mode = "host";
42 maximum-speed = "high-speed";
43 snps,dis_u2_susphy_quirk;
44 phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>;
45 };
46 };
47 };
48};
49
50&apb {
51 usb2_phy0: phy@78000 {
52 compatible = "amlogic,meson-gxl-usb2-phy";
53 #phy-cells = <0>;
54 reg = <0x0 0x78000 0x0 0x20>;
55 clocks = <&clkc CLKID_USB>;
56 clock-names = "phy";
57 resets = <&reset RESET_USB_OTG>;
58 reset-names = "phy";
59 status = "okay";
60 };
61
62 usb2_phy1: phy@78020 {
63 compatible = "amlogic,meson-gxl-usb2-phy";
64 #phy-cells = <0>;
65 reg = <0x0 0x78020 0x0 0x20>;
66 clocks = <&clkc CLKID_USB>;
67 clock-names = "phy";
68 resets = <&reset RESET_USB_OTG>;
69 reset-names = "phy";
70 status = "okay";
71 };
72
73 usb3_phy: phy@78080 {
74 compatible = "amlogic,meson-gxl-usb3-phy";
75 #phy-cells = <0>;
76 reg = <0x0 0x78080 0x0 0x20>;
77 interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
78 clocks = <&clkc CLKID_USB>, <&clkc_AO CLKID_AO_CEC_32K>;
79 clock-names = "phy", "peripheral";
80 resets = <&reset RESET_USB_OTG>, <&reset RESET_USB_OTG>;
81 reset-names = "phy", "peripheral";
82 status = "okay";
83 };
23}; 84};
24 85
25&ethmac { 86&ethmac {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
index 4fd46c1546a7..0868da476e41 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
@@ -406,3 +406,7 @@
406 status = "okay"; 406 status = "okay";
407 vref-supply = <&vddio_ao18>; 407 vref-supply = <&vddio_ao18>;
408}; 408};
409
410&usb0 {
411 status = "okay";
412};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
index d076a7c425dd..247888d68a3a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
@@ -80,6 +80,19 @@
80 }; 80 };
81}; 81};
82 82
83&apb {
84 usb2_phy2: phy@78040 {
85 compatible = "amlogic,meson-gxl-usb2-phy";
86 #phy-cells = <0>;
87 reg = <0x0 0x78040 0x0 0x20>;
88 clocks = <&clkc CLKID_USB>;
89 clock-names = "phy";
90 resets = <&reset RESET_USB_OTG>;
91 reset-names = "phy";
92 status = "okay";
93 };
94};
95
83&clkc_AO { 96&clkc_AO {
84 compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc"; 97 compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc";
85}; 98};
@@ -100,3 +113,7 @@
100&hdmi_tx { 113&hdmi_tx {
101 compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi"; 114 compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
102}; 115};
116
117&dwc3 {
118 phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>, <&usb2_phy2>;
119};
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
index 2ac43221ddb6..69804c5f1197 100644
--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
@@ -56,8 +56,6 @@
56 56
57 gpio_keys { 57 gpio_keys {
58 compatible = "gpio-keys"; 58 compatible = "gpio-keys";
59 #address-cells = <1>;
60 #size-cells = <0>;
61 59
62 power-button { 60 power-button {
63 debounce_interval = <50>; 61 debounce_interval = <50>;
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
index 4b5465da81d8..8c68e0c26f1b 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
@@ -36,11 +36,11 @@
36 #size-cells = <1>; 36 #size-cells = <1>;
37 ranges = <0x0 0x0 0x67d00000 0x00800000>; 37 ranges = <0x0 0x0 0x67d00000 0x00800000>;
38 38
39 sata0: ahci@210000 { 39 sata0: ahci@0 {
40 compatible = "brcm,iproc-ahci", "generic-ahci"; 40 compatible = "brcm,iproc-ahci", "generic-ahci";
41 reg = <0x00210000 0x1000>; 41 reg = <0x00000000 0x1000>;
42 reg-names = "ahci"; 42 reg-names = "ahci";
43 interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>; 43 interrupts = <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
44 #address-cells = <1>; 44 #address-cells = <1>;
45 #size-cells = <0>; 45 #size-cells = <0>;
46 status = "disabled"; 46 status = "disabled";
@@ -52,9 +52,9 @@
52 }; 52 };
53 }; 53 };
54 54
55 sata_phy0: sata_phy@212100 { 55 sata_phy0: sata_phy@2100 {
56 compatible = "brcm,iproc-sr-sata-phy"; 56 compatible = "brcm,iproc-sr-sata-phy";
57 reg = <0x00212100 0x1000>; 57 reg = <0x00002100 0x1000>;
58 reg-names = "phy"; 58 reg-names = "phy";
59 #address-cells = <1>; 59 #address-cells = <1>;
60 #size-cells = <0>; 60 #size-cells = <0>;
@@ -66,11 +66,11 @@
66 }; 66 };
67 }; 67 };
68 68
69 sata1: ahci@310000 { 69 sata1: ahci@10000 {
70 compatible = "brcm,iproc-ahci", "generic-ahci"; 70 compatible = "brcm,iproc-ahci", "generic-ahci";
71 reg = <0x00310000 0x1000>; 71 reg = <0x00010000 0x1000>;
72 reg-names = "ahci"; 72 reg-names = "ahci";
73 interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>; 73 interrupts = <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>;
74 #address-cells = <1>; 74 #address-cells = <1>;
75 #size-cells = <0>; 75 #size-cells = <0>;
76 status = "disabled"; 76 status = "disabled";
@@ -82,9 +82,9 @@
82 }; 82 };
83 }; 83 };
84 84
85 sata_phy1: sata_phy@312100 { 85 sata_phy1: sata_phy@12100 {
86 compatible = "brcm,iproc-sr-sata-phy"; 86 compatible = "brcm,iproc-sr-sata-phy";
87 reg = <0x00312100 0x1000>; 87 reg = <0x00012100 0x1000>;
88 reg-names = "phy"; 88 reg-names = "phy";
89 #address-cells = <1>; 89 #address-cells = <1>;
90 #size-cells = <0>; 90 #size-cells = <0>;
@@ -96,11 +96,11 @@
96 }; 96 };
97 }; 97 };
98 98
99 sata2: ahci@120000 { 99 sata2: ahci@20000 {
100 compatible = "brcm,iproc-ahci", "generic-ahci"; 100 compatible = "brcm,iproc-ahci", "generic-ahci";
101 reg = <0x00120000 0x1000>; 101 reg = <0x00020000 0x1000>;
102 reg-names = "ahci"; 102 reg-names = "ahci";
103 interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>; 103 interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>;
104 #address-cells = <1>; 104 #address-cells = <1>;
105 #size-cells = <0>; 105 #size-cells = <0>;
106 status = "disabled"; 106 status = "disabled";
@@ -112,9 +112,9 @@
112 }; 112 };
113 }; 113 };
114 114
115 sata_phy2: sata_phy@122100 { 115 sata_phy2: sata_phy@22100 {
116 compatible = "brcm,iproc-sr-sata-phy"; 116 compatible = "brcm,iproc-sr-sata-phy";
117 reg = <0x00122100 0x1000>; 117 reg = <0x00022100 0x1000>;
118 reg-names = "phy"; 118 reg-names = "phy";
119 #address-cells = <1>; 119 #address-cells = <1>;
120 #size-cells = <0>; 120 #size-cells = <0>;
@@ -126,11 +126,11 @@
126 }; 126 };
127 }; 127 };
128 128
129 sata3: ahci@130000 { 129 sata3: ahci@30000 {
130 compatible = "brcm,iproc-ahci", "generic-ahci"; 130 compatible = "brcm,iproc-ahci", "generic-ahci";
131 reg = <0x00130000 0x1000>; 131 reg = <0x00030000 0x1000>;
132 reg-names = "ahci"; 132 reg-names = "ahci";
133 interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>; 133 interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>;
134 #address-cells = <1>; 134 #address-cells = <1>;
135 #size-cells = <0>; 135 #size-cells = <0>;
136 status = "disabled"; 136 status = "disabled";
@@ -142,9 +142,9 @@
142 }; 142 };
143 }; 143 };
144 144
145 sata_phy3: sata_phy@132100 { 145 sata_phy3: sata_phy@32100 {
146 compatible = "brcm,iproc-sr-sata-phy"; 146 compatible = "brcm,iproc-sr-sata-phy";
147 reg = <0x00132100 0x1000>; 147 reg = <0x00032100 0x1000>;
148 reg-names = "phy"; 148 reg-names = "phy";
149 #address-cells = <1>; 149 #address-cells = <1>;
150 #size-cells = <0>; 150 #size-cells = <0>;
@@ -156,11 +156,11 @@
156 }; 156 };
157 }; 157 };
158 158
159 sata4: ahci@330000 { 159 sata4: ahci@100000 {
160 compatible = "brcm,iproc-ahci", "generic-ahci"; 160 compatible = "brcm,iproc-ahci", "generic-ahci";
161 reg = <0x00330000 0x1000>; 161 reg = <0x00100000 0x1000>;
162 reg-names = "ahci"; 162 reg-names = "ahci";
163 interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>; 163 interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>;
164 #address-cells = <1>; 164 #address-cells = <1>;
165 #size-cells = <0>; 165 #size-cells = <0>;
166 status = "disabled"; 166 status = "disabled";
@@ -172,9 +172,9 @@
172 }; 172 };
173 }; 173 };
174 174
175 sata_phy4: sata_phy@332100 { 175 sata_phy4: sata_phy@102100 {
176 compatible = "brcm,iproc-sr-sata-phy"; 176 compatible = "brcm,iproc-sr-sata-phy";
177 reg = <0x00332100 0x1000>; 177 reg = <0x00102100 0x1000>;
178 reg-names = "phy"; 178 reg-names = "phy";
179 #address-cells = <1>; 179 #address-cells = <1>;
180 #size-cells = <0>; 180 #size-cells = <0>;
@@ -186,11 +186,11 @@
186 }; 186 };
187 }; 187 };
188 188
189 sata5: ahci@400000 { 189 sata5: ahci@110000 {
190 compatible = "brcm,iproc-ahci", "generic-ahci"; 190 compatible = "brcm,iproc-ahci", "generic-ahci";
191 reg = <0x00400000 0x1000>; 191 reg = <0x00110000 0x1000>;
192 reg-names = "ahci"; 192 reg-names = "ahci";
193 interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>; 193 interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>;
194 #address-cells = <1>; 194 #address-cells = <1>;
195 #size-cells = <0>; 195 #size-cells = <0>;
196 status = "disabled"; 196 status = "disabled";
@@ -202,9 +202,9 @@
202 }; 202 };
203 }; 203 };
204 204
205 sata_phy5: sata_phy@402100 { 205 sata_phy5: sata_phy@112100 {
206 compatible = "brcm,iproc-sr-sata-phy"; 206 compatible = "brcm,iproc-sr-sata-phy";
207 reg = <0x00402100 0x1000>; 207 reg = <0x00112100 0x1000>;
208 reg-names = "phy"; 208 reg-names = "phy";
209 #address-cells = <1>; 209 #address-cells = <1>;
210 #size-cells = <0>; 210 #size-cells = <0>;
@@ -216,11 +216,11 @@
216 }; 216 };
217 }; 217 };
218 218
219 sata6: ahci@410000 { 219 sata6: ahci@120000 {
220 compatible = "brcm,iproc-ahci", "generic-ahci"; 220 compatible = "brcm,iproc-ahci", "generic-ahci";
221 reg = <0x00410000 0x1000>; 221 reg = <0x00120000 0x1000>;
222 reg-names = "ahci"; 222 reg-names = "ahci";
223 interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>; 223 interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>;
224 #address-cells = <1>; 224 #address-cells = <1>;
225 #size-cells = <0>; 225 #size-cells = <0>;
226 status = "disabled"; 226 status = "disabled";
@@ -232,9 +232,9 @@
232 }; 232 };
233 }; 233 };
234 234
235 sata_phy6: sata_phy@412100 { 235 sata_phy6: sata_phy@122100 {
236 compatible = "brcm,iproc-sr-sata-phy"; 236 compatible = "brcm,iproc-sr-sata-phy";
237 reg = <0x00412100 0x1000>; 237 reg = <0x00122100 0x1000>;
238 reg-names = "phy"; 238 reg-names = "phy";
239 #address-cells = <1>; 239 #address-cells = <1>;
240 #size-cells = <0>; 240 #size-cells = <0>;
@@ -246,11 +246,11 @@
246 }; 246 };
247 }; 247 };
248 248
249 sata7: ahci@420000 { 249 sata7: ahci@130000 {
250 compatible = "brcm,iproc-ahci", "generic-ahci"; 250 compatible = "brcm,iproc-ahci", "generic-ahci";
251 reg = <0x00420000 0x1000>; 251 reg = <0x00130000 0x1000>;
252 reg-names = "ahci"; 252 reg-names = "ahci";
253 interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>; 253 interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
254 #address-cells = <1>; 254 #address-cells = <1>;
255 #size-cells = <0>; 255 #size-cells = <0>;
256 status = "disabled"; 256 status = "disabled";
@@ -262,9 +262,9 @@
262 }; 262 };
263 }; 263 };
264 264
265 sata_phy7: sata_phy@422100 { 265 sata_phy7: sata_phy@132100 {
266 compatible = "brcm,iproc-sr-sata-phy"; 266 compatible = "brcm,iproc-sr-sata-phy";
267 reg = <0x00422100 0x1000>; 267 reg = <0x00132100 0x1000>;
268 reg-names = "phy"; 268 reg-names = "phy";
269 #address-cells = <1>; 269 #address-cells = <1>;
270 #size-cells = <0>; 270 #size-cells = <0>;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 23d8acca5c90..cd4fd85fde84 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -11,6 +11,7 @@ config RISCV
11 select ARCH_WANT_FRAME_POINTERS 11 select ARCH_WANT_FRAME_POINTERS
12 select CLONE_BACKWARDS 12 select CLONE_BACKWARDS
13 select COMMON_CLK 13 select COMMON_CLK
14 select DMA_DIRECT_OPS
14 select GENERIC_CLOCKEVENTS 15 select GENERIC_CLOCKEVENTS
15 select GENERIC_CPU_DEVICES 16 select GENERIC_CPU_DEVICES
16 select GENERIC_IRQ_SHOW 17 select GENERIC_IRQ_SHOW
@@ -89,9 +90,6 @@ config PGTABLE_LEVELS
89config HAVE_KPROBES 90config HAVE_KPROBES
90 def_bool n 91 def_bool n
91 92
92config DMA_DIRECT_OPS
93 def_bool y
94
95menu "Platform type" 93menu "Platform type"
96 94
97choice 95choice
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 1e5fd280fb4d..4286a5f83876 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -15,7 +15,6 @@ generic-y += fcntl.h
15generic-y += futex.h 15generic-y += futex.h
16generic-y += hardirq.h 16generic-y += hardirq.h
17generic-y += hash.h 17generic-y += hash.h
18generic-y += handle_irq.h
19generic-y += hw_irq.h 18generic-y += hw_irq.h
20generic-y += ioctl.h 19generic-y += ioctl.h
21generic-y += ioctls.h 20generic-y += ioctls.h
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 324568d33921..f6561b783b61 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -52,7 +52,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
52# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions. 52# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
53# Make sure only to export the intended __vdso_xxx symbol offsets. 53# Make sure only to export the intended __vdso_xxx symbol offsets.
54quiet_cmd_vdsold = VDSOLD $@ 54quiet_cmd_vdsold = VDSOLD $@
55 cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \ 55 cmd_vdsold = $(CC) $(KCFLAGS) $(call cc-option, -no-pie) -nostdlib $(SYSCFLAGS_$(@F)) \
56 -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \ 56 -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
57 $(CROSS_COMPILE)objcopy \ 57 $(CROSS_COMPILE)objcopy \
58 $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ 58 $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 83ba57533ce6..3c883c368eb0 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -45,6 +45,9 @@ struct thread_info {
45void arch_release_task_struct(struct task_struct *tsk); 45void arch_release_task_struct(struct task_struct *tsk);
46int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 46int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
47 47
48void arch_setup_new_exec(void);
49#define arch_setup_new_exec arch_setup_new_exec
50
48#endif 51#endif
49 52
50/* 53/*
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 5a83be955c70..0dc8ac8548ee 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -465,11 +465,11 @@ int module_finalize(const Elf_Ehdr *hdr,
465 apply_alternatives(aseg, aseg + s->sh_size); 465 apply_alternatives(aseg, aseg + s->sh_size);
466 466
467 if (IS_ENABLED(CONFIG_EXPOLINE) && 467 if (IS_ENABLED(CONFIG_EXPOLINE) &&
468 (!strcmp(".nospec_call_table", secname))) 468 (!strncmp(".s390_indirect", secname, 14)))
469 nospec_revert(aseg, aseg + s->sh_size); 469 nospec_revert(aseg, aseg + s->sh_size);
470 470
471 if (IS_ENABLED(CONFIG_EXPOLINE) && 471 if (IS_ENABLED(CONFIG_EXPOLINE) &&
472 (!strcmp(".nospec_return_table", secname))) 472 (!strncmp(".s390_return", secname, 12)))
473 nospec_revert(aseg, aseg + s->sh_size); 473 nospec_revert(aseg, aseg + s->sh_size);
474 } 474 }
475 475
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index 5ee27dc9a10c..feebb2944882 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -123,7 +123,7 @@ CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1);
123CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1); 123CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1);
124CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2); 124CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2);
125CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3); 125CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3);
126CPUMF_EVENT_ATTR(cf_z13, L1D_WRITES_RO_EXCL, 0x0080); 126CPUMF_EVENT_ATTR(cf_z13, L1D_RO_EXCL_WRITES, 0x0080);
127CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081); 127CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081);
128CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082); 128CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082);
129CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083); 129CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083);
@@ -179,7 +179,7 @@ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db);
179CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc); 179CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc);
180CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); 180CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
181CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); 181CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
182CPUMF_EVENT_ATTR(cf_z14, L1D_WRITES_RO_EXCL, 0x0080); 182CPUMF_EVENT_ATTR(cf_z14, L1D_RO_EXCL_WRITES, 0x0080);
183CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081); 183CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081);
184CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082); 184CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082);
185CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083); 185CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083);
@@ -371,7 +371,7 @@ static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = {
371}; 371};
372 372
373static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = { 373static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
374 CPUMF_EVENT_PTR(cf_z13, L1D_WRITES_RO_EXCL), 374 CPUMF_EVENT_PTR(cf_z13, L1D_RO_EXCL_WRITES),
375 CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES), 375 CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES),
376 CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES), 376 CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES),
377 CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES), 377 CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES),
@@ -431,7 +431,7 @@ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
431}; 431};
432 432
433static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = { 433static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = {
434 CPUMF_EVENT_PTR(cf_z14, L1D_WRITES_RO_EXCL), 434 CPUMF_EVENT_PTR(cf_z14, L1D_RO_EXCL_WRITES),
435 CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES), 435 CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES),
436 CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES), 436 CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES),
437 CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES), 437 CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES),
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 70576a2f69cf..6e758bb6cd29 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -29,6 +29,7 @@
29#include <linux/random.h> 29#include <linux/random.h>
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/init_task.h> 31#include <linux/init_task.h>
32#include <asm/cpu_mf.h>
32#include <asm/io.h> 33#include <asm/io.h>
33#include <asm/processor.h> 34#include <asm/processor.h>
34#include <asm/vtimer.h> 35#include <asm/vtimer.h>
@@ -48,6 +49,15 @@ void flush_thread(void)
48{ 49{
49} 50}
50 51
52void arch_setup_new_exec(void)
53{
54 if (S390_lowcore.current_pid != current->pid) {
55 S390_lowcore.current_pid = current->pid;
56 if (test_facility(40))
57 lpp(&S390_lowcore.lpp);
58 }
59}
60
51void arch_release_task_struct(struct task_struct *tsk) 61void arch_release_task_struct(struct task_struct *tsk)
52{ 62{
53 runtime_instr_release(tsk); 63 runtime_instr_release(tsk);
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index d9d1f512f019..5007fac01bb5 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -150,6 +150,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
150 return orig; 150 return orig;
151} 151}
152 152
153bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
154 struct pt_regs *regs)
155{
156 if (ctx == RP_CHECK_CHAIN_CALL)
157 return user_stack_pointer(regs) <= ret->stack;
158 else
159 return user_stack_pointer(regs) < ret->stack;
160}
161
153/* Instruction Emulation */ 162/* Instruction Emulation */
154 163
155static void adjust_psw_addr(psw_t *psw, unsigned long len) 164static void adjust_psw_addr(psw_t *psw, unsigned long len)
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 09ad88572746..cc8f8fcf9b4a 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -46,7 +46,21 @@ int ftrace_int3_handler(struct pt_regs *regs);
46#endif /* CONFIG_FUNCTION_TRACER */ 46#endif /* CONFIG_FUNCTION_TRACER */
47 47
48 48
49#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS) 49#ifndef __ASSEMBLY__
50
51#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
52static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
53{
54 /*
55 * Compare the symbol name with the system call name. Skip the
56 * "__x64_sys", "__ia32_sys" or simple "sys" prefix.
57 */
58 return !strcmp(sym + 3, name + 3) ||
59 (!strncmp(sym, "__x64_", 6) && !strcmp(sym + 9, name + 3)) ||
60 (!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3));
61}
62
63#ifndef COMPILE_OFFSETS
50 64
51#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION) 65#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
52#include <asm/compat.h> 66#include <asm/compat.h>
@@ -67,6 +81,7 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
67 return false; 81 return false;
68} 82}
69#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ 83#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
70#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */ 84#endif /* !COMPILE_OFFSETS */
85#endif /* !__ASSEMBLY__ */
71 86
72#endif /* _ASM_X86_FTRACE_H */ 87#endif /* _ASM_X86_FTRACE_H */
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index f0ecd98509d8..771ae9730ac6 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4934,8 +4934,16 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
4934 bool new_queue = false; 4934 bool new_queue = false;
4935 bool bfqq_already_existing = false, split = false; 4935 bool bfqq_already_existing = false, split = false;
4936 4936
4937 if (!rq->elv.icq) 4937 /*
4938 * Even if we don't have an icq attached, we should still clear
4939 * the scheduler pointers, as they might point to previously
4940 * allocated bic/bfqq structs.
4941 */
4942 if (!rq->elv.icq) {
4943 rq->elv.priv[0] = rq->elv.priv[1] = NULL;
4938 return; 4944 return;
4945 }
4946
4939 bic = icq_to_bic(rq->elv.icq); 4947 bic = icq_to_bic(rq->elv.icq);
4940 4948
4941 spin_lock_irq(&bfqd->lock); 4949 spin_lock_irq(&bfqd->lock);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 1c16694ae145..eb85cb87c40f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1177,26 +1177,20 @@ int blkcg_init_queue(struct request_queue *q)
1177 1177
1178 preloaded = !radix_tree_preload(GFP_KERNEL); 1178 preloaded = !radix_tree_preload(GFP_KERNEL);
1179 1179
1180 /* 1180 /* Make sure the root blkg exists. */
1181 * Make sure the root blkg exists and count the existing blkgs. As
1182 * @q is bypassing at this point, blkg_lookup_create() can't be
1183 * used. Open code insertion.
1184 */
1185 rcu_read_lock(); 1181 rcu_read_lock();
1186 spin_lock_irq(q->queue_lock); 1182 spin_lock_irq(q->queue_lock);
1187 blkg = blkg_create(&blkcg_root, q, new_blkg); 1183 blkg = blkg_create(&blkcg_root, q, new_blkg);
1184 if (IS_ERR(blkg))
1185 goto err_unlock;
1186 q->root_blkg = blkg;
1187 q->root_rl.blkg = blkg;
1188 spin_unlock_irq(q->queue_lock); 1188 spin_unlock_irq(q->queue_lock);
1189 rcu_read_unlock(); 1189 rcu_read_unlock();
1190 1190
1191 if (preloaded) 1191 if (preloaded)
1192 radix_tree_preload_end(); 1192 radix_tree_preload_end();
1193 1193
1194 if (IS_ERR(blkg))
1195 return PTR_ERR(blkg);
1196
1197 q->root_blkg = blkg;
1198 q->root_rl.blkg = blkg;
1199
1200 ret = blk_throtl_init(q); 1194 ret = blk_throtl_init(q);
1201 if (ret) { 1195 if (ret) {
1202 spin_lock_irq(q->queue_lock); 1196 spin_lock_irq(q->queue_lock);
@@ -1204,6 +1198,13 @@ int blkcg_init_queue(struct request_queue *q)
1204 spin_unlock_irq(q->queue_lock); 1198 spin_unlock_irq(q->queue_lock);
1205 } 1199 }
1206 return ret; 1200 return ret;
1201
1202err_unlock:
1203 spin_unlock_irq(q->queue_lock);
1204 rcu_read_unlock();
1205 if (preloaded)
1206 radix_tree_preload_end();
1207 return PTR_ERR(blkg);
1207} 1208}
1208 1209
1209/** 1210/**
@@ -1410,9 +1411,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
1410 __clear_bit(pol->plid, q->blkcg_pols); 1411 __clear_bit(pol->plid, q->blkcg_pols);
1411 1412
1412 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1413 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1413 /* grab blkcg lock too while removing @pd from @blkg */
1414 spin_lock(&blkg->blkcg->lock);
1415
1416 if (blkg->pd[pol->plid]) { 1414 if (blkg->pd[pol->plid]) {
1417 if (!blkg->pd[pol->plid]->offline && 1415 if (!blkg->pd[pol->plid]->offline &&
1418 pol->pd_offline_fn) { 1416 pol->pd_offline_fn) {
@@ -1422,8 +1420,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
1422 pol->pd_free_fn(blkg->pd[pol->plid]); 1420 pol->pd_free_fn(blkg->pd[pol->plid]);
1423 blkg->pd[pol->plid] = NULL; 1421 blkg->pd[pol->plid] = NULL;
1424 } 1422 }
1425
1426 spin_unlock(&blkg->blkcg->lock);
1427 } 1423 }
1428 1424
1429 spin_unlock_irq(q->queue_lock); 1425 spin_unlock_irq(q->queue_lock);
diff --git a/block/blk-core.c b/block/blk-core.c
index 806ce2442819..85909b431eb0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -201,6 +201,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
201 rq->part = NULL; 201 rq->part = NULL;
202 seqcount_init(&rq->gstate_seq); 202 seqcount_init(&rq->gstate_seq);
203 u64_stats_init(&rq->aborted_gstate_sync); 203 u64_stats_init(&rq->aborted_gstate_sync);
204 /*
205 * See comment of blk_mq_init_request
206 */
207 WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
204} 208}
205EXPORT_SYMBOL(blk_rq_init); 209EXPORT_SYMBOL(blk_rq_init);
206 210
@@ -915,7 +919,6 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
915 919
916 while (true) { 920 while (true) {
917 bool success = false; 921 bool success = false;
918 int ret;
919 922
920 rcu_read_lock(); 923 rcu_read_lock();
921 if (percpu_ref_tryget_live(&q->q_usage_counter)) { 924 if (percpu_ref_tryget_live(&q->q_usage_counter)) {
@@ -947,14 +950,12 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
947 */ 950 */
948 smp_rmb(); 951 smp_rmb();
949 952
950 ret = wait_event_interruptible(q->mq_freeze_wq, 953 wait_event(q->mq_freeze_wq,
951 (atomic_read(&q->mq_freeze_depth) == 0 && 954 (atomic_read(&q->mq_freeze_depth) == 0 &&
952 (preempt || !blk_queue_preempt_only(q))) || 955 (preempt || !blk_queue_preempt_only(q))) ||
953 blk_queue_dying(q)); 956 blk_queue_dying(q));
954 if (blk_queue_dying(q)) 957 if (blk_queue_dying(q))
955 return -ENODEV; 958 return -ENODEV;
956 if (ret)
957 return ret;
958 } 959 }
959} 960}
960 961
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0dc9e341c2a7..c3621453ad87 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2042,6 +2042,13 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2042 2042
2043 seqcount_init(&rq->gstate_seq); 2043 seqcount_init(&rq->gstate_seq);
2044 u64_stats_init(&rq->aborted_gstate_sync); 2044 u64_stats_init(&rq->aborted_gstate_sync);
2045 /*
2046 * start gstate with gen 1 instead of 0, otherwise it will be equal
2047 * to aborted_gstate, and be identified timed out by
2048 * blk_mq_terminate_expired.
2049 */
2050 WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
2051
2045 return 0; 2052 return 0;
2046} 2053}
2047 2054
@@ -2329,7 +2336,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2329 2336
2330static void blk_mq_map_swqueue(struct request_queue *q) 2337static void blk_mq_map_swqueue(struct request_queue *q)
2331{ 2338{
2332 unsigned int i; 2339 unsigned int i, hctx_idx;
2333 struct blk_mq_hw_ctx *hctx; 2340 struct blk_mq_hw_ctx *hctx;
2334 struct blk_mq_ctx *ctx; 2341 struct blk_mq_ctx *ctx;
2335 struct blk_mq_tag_set *set = q->tag_set; 2342 struct blk_mq_tag_set *set = q->tag_set;
@@ -2346,8 +2353,23 @@ static void blk_mq_map_swqueue(struct request_queue *q)
2346 2353
2347 /* 2354 /*
2348 * Map software to hardware queues. 2355 * Map software to hardware queues.
2356 *
2357 * If the cpu isn't present, the cpu is mapped to first hctx.
2349 */ 2358 */
2350 for_each_possible_cpu(i) { 2359 for_each_possible_cpu(i) {
2360 hctx_idx = q->mq_map[i];
2361 /* unmapped hw queue can be remapped after CPU topo changed */
2362 if (!set->tags[hctx_idx] &&
2363 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2364 /*
2365 * If tags initialization fail for some hctx,
2366 * that hctx won't be brought online. In this
2367 * case, remap the current ctx to hctx[0] which
2368 * is guaranteed to always have tags allocated
2369 */
2370 q->mq_map[i] = 0;
2371 }
2372
2351 ctx = per_cpu_ptr(q->queue_ctx, i); 2373 ctx = per_cpu_ptr(q->queue_ctx, i);
2352 hctx = blk_mq_map_queue(q, i); 2374 hctx = blk_mq_map_queue(q, i);
2353 2375
@@ -2359,8 +2381,21 @@ static void blk_mq_map_swqueue(struct request_queue *q)
2359 mutex_unlock(&q->sysfs_lock); 2381 mutex_unlock(&q->sysfs_lock);
2360 2382
2361 queue_for_each_hw_ctx(q, hctx, i) { 2383 queue_for_each_hw_ctx(q, hctx, i) {
2362 /* every hctx should get mapped by at least one CPU */ 2384 /*
2363 WARN_ON(!hctx->nr_ctx); 2385 * If no software queues are mapped to this hardware queue,
2386 * disable it and free the request entries.
2387 */
2388 if (!hctx->nr_ctx) {
2389 /* Never unmap queue 0. We need it as a
2390 * fallback in case of a new remap fails
2391 * allocation
2392 */
2393 if (i && set->tags[i])
2394 blk_mq_free_map_and_requests(set, i);
2395
2396 hctx->tags = NULL;
2397 continue;
2398 }
2364 2399
2365 hctx->tags = set->tags[i]; 2400 hctx->tags = set->tags[i];
2366 WARN_ON(!hctx->tags); 2401 WARN_ON(!hctx->tags);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 88c558f71819..89b5cd3a6c70 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -7,6 +7,9 @@
7 7
8struct blk_mq_tag_set; 8struct blk_mq_tag_set;
9 9
10/**
11 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
12 */
10struct blk_mq_ctx { 13struct blk_mq_ctx {
11 struct { 14 struct {
12 spinlock_t lock; 15 spinlock_t lock;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 76fb96966f7b..2f2e737be0f8 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void)
2123 return opregion; 2123 return opregion;
2124} 2124}
2125 2125
2126static bool dmi_is_desktop(void)
2127{
2128 const char *chassis_type;
2129
2130 chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
2131 if (!chassis_type)
2132 return false;
2133
2134 if (!strcmp(chassis_type, "3") || /* 3: Desktop */
2135 !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
2136 !strcmp(chassis_type, "5") || /* 5: Pizza Box */
2137 !strcmp(chassis_type, "6") || /* 6: Mini Tower */
2138 !strcmp(chassis_type, "7") || /* 7: Tower */
2139 !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
2140 return true;
2141
2142 return false;
2143}
2144
2126int acpi_video_register(void) 2145int acpi_video_register(void)
2127{ 2146{
2128 int ret = 0; 2147 int ret = 0;
@@ -2143,8 +2162,12 @@ int acpi_video_register(void)
2143 * win8 ready (where we also prefer the native backlight driver, so 2162 * win8 ready (where we also prefer the native backlight driver, so
2144 * normally the acpi_video code should not register there anyways). 2163 * normally the acpi_video code should not register there anyways).
2145 */ 2164 */
2146 if (only_lcd == -1) 2165 if (only_lcd == -1) {
2147 only_lcd = acpi_osi_is_win8(); 2166 if (dmi_is_desktop() && acpi_osi_is_win8())
2167 only_lcd = true;
2168 else
2169 only_lcd = false;
2170 }
2148 2171
2149 dmi_check_system(video_dmi_table); 2172 dmi_check_system(video_dmi_table);
2150 2173
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index ebb626ffb5fa..4bde16fb97d8 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -12,23 +12,64 @@
12#define pr_fmt(fmt) "ACPI: watchdog: " fmt 12#define pr_fmt(fmt) "ACPI: watchdog: " fmt
13 13
14#include <linux/acpi.h> 14#include <linux/acpi.h>
15#include <linux/dmi.h>
15#include <linux/ioport.h> 16#include <linux/ioport.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17 18
18#include "internal.h" 19#include "internal.h"
19 20
21static const struct dmi_system_id acpi_watchdog_skip[] = {
22 {
23 /*
24 * On Lenovo Z50-70 there are two issues with the WDAT
25 * table. First some of the instructions use RTC SRAM
26 * to store persistent information. This does not work well
27 * with Linux RTC driver. Second, more important thing is
28 * that the instructions do not actually reset the system.
29 *
30 * On this particular system iTCO_wdt seems to work just
31 * fine so we prefer that over WDAT for now.
32 *
33 * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
34 */
35 .ident = "Lenovo Z50-70",
36 .matches = {
37 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
38 DMI_MATCH(DMI_PRODUCT_NAME, "20354"),
39 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"),
40 },
41 },
42 {}
43};
44
45static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
46{
47 const struct acpi_table_wdat *wdat = NULL;
48 acpi_status status;
49
50 if (acpi_disabled)
51 return NULL;
52
53 if (dmi_check_system(acpi_watchdog_skip))
54 return NULL;
55
56 status = acpi_get_table(ACPI_SIG_WDAT, 0,
57 (struct acpi_table_header **)&wdat);
58 if (ACPI_FAILURE(status)) {
59 /* It is fine if there is no WDAT */
60 return NULL;
61 }
62
63 return wdat;
64}
65
20/** 66/**
21 * Returns true if this system should prefer ACPI based watchdog instead of 67 * Returns true if this system should prefer ACPI based watchdog instead of
22 * the native one (which are typically the same hardware). 68 * the native one (which are typically the same hardware).
23 */ 69 */
24bool acpi_has_watchdog(void) 70bool acpi_has_watchdog(void)
25{ 71{
26 struct acpi_table_header hdr; 72 return !!acpi_watchdog_get_wdat();
27
28 if (acpi_disabled)
29 return false;
30
31 return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr));
32} 73}
33EXPORT_SYMBOL_GPL(acpi_has_watchdog); 74EXPORT_SYMBOL_GPL(acpi_has_watchdog);
34 75
@@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void)
41 struct platform_device *pdev; 82 struct platform_device *pdev;
42 struct resource *resources; 83 struct resource *resources;
43 size_t nresources = 0; 84 size_t nresources = 0;
44 acpi_status status;
45 int i; 85 int i;
46 86
47 status = acpi_get_table(ACPI_SIG_WDAT, 0, 87 wdat = acpi_watchdog_get_wdat();
48 (struct acpi_table_header **)&wdat); 88 if (!wdat) {
49 if (ACPI_FAILURE(status)) {
50 /* It is fine if there is no WDAT */ 89 /* It is fine if there is no WDAT */
51 return; 90 return;
52 } 91 }
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index e1eee7a60fad..f1cc4f9d31cd 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -635,4 +635,26 @@ module_param_call(lid_init_state,
635 NULL, 0644); 635 NULL, 0644);
636MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state"); 636MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
637 637
638module_acpi_driver(acpi_button_driver); 638static int acpi_button_register_driver(struct acpi_driver *driver)
639{
640 /*
641 * Modules such as nouveau.ko and i915.ko have a link time dependency
642 * on acpi_lid_open(), and would therefore not be loadable on ACPI
643 * capable kernels booted in non-ACPI mode if the return value of
644 * acpi_bus_register_driver() is returned from here with ACPI disabled
645 * when this driver is built as a module.
646 */
647 if (acpi_disabled)
648 return 0;
649
650 return acpi_bus_register_driver(driver);
651}
652
653static void acpi_button_unregister_driver(struct acpi_driver *driver)
654{
655 if (!acpi_disabled)
656 acpi_bus_unregister_driver(driver);
657}
658
659module_driver(acpi_button_driver, acpi_button_register_driver,
660 acpi_button_unregister_driver);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index cc234e6a6297..970dd87d347c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2166,10 +2166,10 @@ int __init acpi_scan_init(void)
2166 acpi_cmos_rtc_init(); 2166 acpi_cmos_rtc_init();
2167 acpi_container_init(); 2167 acpi_container_init();
2168 acpi_memory_hotplug_init(); 2168 acpi_memory_hotplug_init();
2169 acpi_watchdog_init();
2169 acpi_pnp_init(); 2170 acpi_pnp_init();
2170 acpi_int340x_thermal_init(); 2171 acpi_int340x_thermal_init();
2171 acpi_amba_init(); 2172 acpi_amba_init();
2172 acpi_watchdog_init();
2173 acpi_init_lpit(); 2173 acpi_init_lpit();
2174 2174
2175 acpi_scan_add_handler(&generic_device_handler); 2175 acpi_scan_add_handler(&generic_device_handler);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 99a1a650326d..974e58457697 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -364,6 +364,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
364 DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), 364 DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
365 }, 365 },
366 }, 366 },
367 /*
368 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
369 * the Low Power S0 Idle firmware interface (see
370 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
371 */
372 {
373 .callback = init_no_lps0,
374 .ident = "ThinkPad X1 Tablet(2016)",
375 .matches = {
376 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
377 DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
378 },
379 },
367 {}, 380 {},
368}; 381};
369 382
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 1e6396bb807b..597d40893862 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -312,8 +312,9 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
312 * This checks whether the memory was allocated from the per-device 312 * This checks whether the memory was allocated from the per-device
313 * coherent memory pool and if so, maps that memory to the provided vma. 313 * coherent memory pool and if so, maps that memory to the provided vma.
314 * 314 *
315 * Returns 1 if we correctly mapped the memory, or 0 if the caller should 315 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
316 * proceed with mapping memory from generic pools. 316 * should return @ret, or 0 if they should proceed with mapping memory from
317 * generic areas.
317 */ 318 */
318int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, 319int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
319 void *vaddr, size_t size, int *ret) 320 void *vaddr, size_t size, int *ret)
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 3b118353ea17..d82566d6e237 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -226,7 +226,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
226#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP 226#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
227 unsigned long user_count = vma_pages(vma); 227 unsigned long user_count = vma_pages(vma);
228 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 228 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
229 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
230 unsigned long off = vma->vm_pgoff; 229 unsigned long off = vma->vm_pgoff;
231 230
232 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 231 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -234,12 +233,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
234 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 233 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
235 return ret; 234 return ret;
236 235
237 if (off < count && user_count <= (count - off)) { 236 if (off < count && user_count <= (count - off))
238 ret = remap_pfn_range(vma, vma->vm_start, 237 ret = remap_pfn_range(vma, vma->vm_start,
239 pfn + off, 238 page_to_pfn(virt_to_page(cpu_addr)) + off,
240 user_count << PAGE_SHIFT, 239 user_count << PAGE_SHIFT,
241 vma->vm_page_prot); 240 vma->vm_page_prot);
242 }
243#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ 241#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
244 242
245 return ret; 243 return ret;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c9d04497a415..5d4e31655d96 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -451,25 +451,47 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
451static void lo_complete_rq(struct request *rq) 451static void lo_complete_rq(struct request *rq)
452{ 452{
453 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 453 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
454 blk_status_t ret = BLK_STS_OK;
454 455
455 if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio && 456 if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
456 cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) { 457 req_op(rq) != REQ_OP_READ) {
457 struct bio *bio = cmd->rq->bio; 458 if (cmd->ret < 0)
458 459 ret = BLK_STS_IOERR;
459 bio_advance(bio, cmd->ret); 460 goto end_io;
460 zero_fill_bio(bio);
461 } 461 }
462 462
463 blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK); 463 /*
464 * Short READ - if we got some data, advance our request and
465 * retry it. If we got no data, end the rest with EIO.
466 */
467 if (cmd->ret) {
468 blk_update_request(rq, BLK_STS_OK, cmd->ret);
469 cmd->ret = 0;
470 blk_mq_requeue_request(rq, true);
471 } else {
472 if (cmd->use_aio) {
473 struct bio *bio = rq->bio;
474
475 while (bio) {
476 zero_fill_bio(bio);
477 bio = bio->bi_next;
478 }
479 }
480 ret = BLK_STS_IOERR;
481end_io:
482 blk_mq_end_request(rq, ret);
483 }
464} 484}
465 485
466static void lo_rw_aio_do_completion(struct loop_cmd *cmd) 486static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
467{ 487{
488 struct request *rq = blk_mq_rq_from_pdu(cmd);
489
468 if (!atomic_dec_and_test(&cmd->ref)) 490 if (!atomic_dec_and_test(&cmd->ref))
469 return; 491 return;
470 kfree(cmd->bvec); 492 kfree(cmd->bvec);
471 cmd->bvec = NULL; 493 cmd->bvec = NULL;
472 blk_mq_complete_request(cmd->rq); 494 blk_mq_complete_request(rq);
473} 495}
474 496
475static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) 497static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -487,7 +509,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
487{ 509{
488 struct iov_iter iter; 510 struct iov_iter iter;
489 struct bio_vec *bvec; 511 struct bio_vec *bvec;
490 struct request *rq = cmd->rq; 512 struct request *rq = blk_mq_rq_from_pdu(cmd);
491 struct bio *bio = rq->bio; 513 struct bio *bio = rq->bio;
492 struct file *file = lo->lo_backing_file; 514 struct file *file = lo->lo_backing_file;
493 unsigned int offset; 515 unsigned int offset;
@@ -1702,15 +1724,16 @@ EXPORT_SYMBOL(loop_unregister_transfer);
1702static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1724static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1703 const struct blk_mq_queue_data *bd) 1725 const struct blk_mq_queue_data *bd)
1704{ 1726{
1705 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1727 struct request *rq = bd->rq;
1706 struct loop_device *lo = cmd->rq->q->queuedata; 1728 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1729 struct loop_device *lo = rq->q->queuedata;
1707 1730
1708 blk_mq_start_request(bd->rq); 1731 blk_mq_start_request(rq);
1709 1732
1710 if (lo->lo_state != Lo_bound) 1733 if (lo->lo_state != Lo_bound)
1711 return BLK_STS_IOERR; 1734 return BLK_STS_IOERR;
1712 1735
1713 switch (req_op(cmd->rq)) { 1736 switch (req_op(rq)) {
1714 case REQ_OP_FLUSH: 1737 case REQ_OP_FLUSH:
1715 case REQ_OP_DISCARD: 1738 case REQ_OP_DISCARD:
1716 case REQ_OP_WRITE_ZEROES: 1739 case REQ_OP_WRITE_ZEROES:
@@ -1723,8 +1746,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1723 1746
1724 /* always use the first bio's css */ 1747 /* always use the first bio's css */
1725#ifdef CONFIG_BLK_CGROUP 1748#ifdef CONFIG_BLK_CGROUP
1726 if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) { 1749 if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
1727 cmd->css = cmd->rq->bio->bi_css; 1750 cmd->css = rq->bio->bi_css;
1728 css_get(cmd->css); 1751 css_get(cmd->css);
1729 } else 1752 } else
1730#endif 1753#endif
@@ -1736,8 +1759,9 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1736 1759
1737static void loop_handle_cmd(struct loop_cmd *cmd) 1760static void loop_handle_cmd(struct loop_cmd *cmd)
1738{ 1761{
1739 const bool write = op_is_write(req_op(cmd->rq)); 1762 struct request *rq = blk_mq_rq_from_pdu(cmd);
1740 struct loop_device *lo = cmd->rq->q->queuedata; 1763 const bool write = op_is_write(req_op(rq));
1764 struct loop_device *lo = rq->q->queuedata;
1741 int ret = 0; 1765 int ret = 0;
1742 1766
1743 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { 1767 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
@@ -1745,12 +1769,12 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
1745 goto failed; 1769 goto failed;
1746 } 1770 }
1747 1771
1748 ret = do_req_filebacked(lo, cmd->rq); 1772 ret = do_req_filebacked(lo, rq);
1749 failed: 1773 failed:
1750 /* complete non-aio request */ 1774 /* complete non-aio request */
1751 if (!cmd->use_aio || ret) { 1775 if (!cmd->use_aio || ret) {
1752 cmd->ret = ret ? -EIO : 0; 1776 cmd->ret = ret ? -EIO : 0;
1753 blk_mq_complete_request(cmd->rq); 1777 blk_mq_complete_request(rq);
1754 } 1778 }
1755} 1779}
1756 1780
@@ -1767,9 +1791,7 @@ static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
1767{ 1791{
1768 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 1792 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1769 1793
1770 cmd->rq = rq;
1771 kthread_init_work(&cmd->work, loop_queue_work); 1794 kthread_init_work(&cmd->work, loop_queue_work);
1772
1773 return 0; 1795 return 0;
1774} 1796}
1775 1797
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 0f45416e4fcf..b78de9879f4f 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -66,7 +66,6 @@ struct loop_device {
66 66
67struct loop_cmd { 67struct loop_cmd {
68 struct kthread_work work; 68 struct kthread_work work;
69 struct request *rq;
70 bool use_aio; /* use AIO interface to handle I/O */ 69 bool use_aio; /* use AIO interface to handle I/O */
71 atomic_t ref; /* only for aio */ 70 atomic_t ref; /* only for aio */
72 long ret; 71 long ret;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 64e066eba72e..0e31884a9519 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -110,7 +110,7 @@ struct iwm {
110/* Select values for swim_select and swim_readbit */ 110/* Select values for swim_select and swim_readbit */
111 111
112#define READ_DATA_0 0x074 112#define READ_DATA_0 0x074
113#define TWOMEG_DRIVE 0x075 113#define ONEMEG_DRIVE 0x075
114#define SINGLE_SIDED 0x076 114#define SINGLE_SIDED 0x076
115#define DRIVE_PRESENT 0x077 115#define DRIVE_PRESENT 0x077
116#define DISK_IN 0x170 116#define DISK_IN 0x170
@@ -118,9 +118,9 @@ struct iwm {
118#define TRACK_ZERO 0x172 118#define TRACK_ZERO 0x172
119#define TACHO 0x173 119#define TACHO 0x173
120#define READ_DATA_1 0x174 120#define READ_DATA_1 0x174
121#define MFM_MODE 0x175 121#define GCR_MODE 0x175
122#define SEEK_COMPLETE 0x176 122#define SEEK_COMPLETE 0x176
123#define ONEMEG_MEDIA 0x177 123#define TWOMEG_MEDIA 0x177
124 124
125/* Bits in handshake register */ 125/* Bits in handshake register */
126 126
@@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs)
612 struct floppy_struct *g; 612 struct floppy_struct *g;
613 fs->disk_in = 1; 613 fs->disk_in = 1;
614 fs->write_protected = swim_readbit(base, WRITE_PROT); 614 fs->write_protected = swim_readbit(base, WRITE_PROT);
615 fs->type = swim_readbit(base, ONEMEG_MEDIA);
616 615
617 if (swim_track00(base)) 616 if (swim_track00(base))
618 printk(KERN_ERR 617 printk(KERN_ERR
@@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs)
620 619
621 swim_track00(base); 620 swim_track00(base);
622 621
622 fs->type = swim_readbit(base, TWOMEG_MEDIA) ?
623 HD_MEDIA : DD_MEDIA;
624 fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2;
623 get_floppy_geometry(fs, 0, &g); 625 get_floppy_geometry(fs, 0, &g);
624 fs->total_secs = g->size; 626 fs->total_secs = g->size;
625 fs->secpercyl = g->head * g->sect; 627 fs->secpercyl = g->head * g->sect;
@@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
646 648
647 swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2); 649 swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2);
648 udelay(10); 650 udelay(10);
649 swim_drive(base, INTERNAL_DRIVE); 651 swim_drive(base, fs->location);
650 swim_motor(base, ON); 652 swim_motor(base, ON);
651 swim_action(base, SETMFM); 653 swim_action(base, SETMFM);
652 if (fs->ejected) 654 if (fs->ejected)
@@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
656 goto out; 658 goto out;
657 } 659 }
658 660
661 set_capacity(fs->disk, fs->total_secs);
662
659 if (mode & FMODE_NDELAY) 663 if (mode & FMODE_NDELAY)
660 return 0; 664 return 0;
661 665
@@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
727 if (copy_to_user((void __user *) param, (void *) &floppy_type, 731 if (copy_to_user((void __user *) param, (void *) &floppy_type,
728 sizeof(struct floppy_struct))) 732 sizeof(struct floppy_struct)))
729 return -EFAULT; 733 return -EFAULT;
730 break; 734 return 0;
731
732 default:
733 printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n",
734 cmd);
735 return -ENOSYS;
736 } 735 }
737 return 0; 736 return -ENOTTY;
738} 737}
739 738
740static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo) 739static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
795 struct swim_priv *swd = data; 794 struct swim_priv *swd = data;
796 int drive = (*part & 3); 795 int drive = (*part & 3);
797 796
798 if (drive > swd->floppy_count) 797 if (drive >= swd->floppy_count)
799 return NULL; 798 return NULL;
800 799
801 *part = 0; 800 *part = 0;
@@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
813 812
814 swim_motor(base, OFF); 813 swim_motor(base, OFF);
815 814
816 if (swim_readbit(base, SINGLE_SIDED)) 815 fs->type = HD_MEDIA;
817 fs->head_number = 1; 816 fs->head_number = 2;
818 else 817
819 fs->head_number = 2;
820 fs->ref_count = 0; 818 fs->ref_count = 0;
821 fs->ejected = 1; 819 fs->ejected = 1;
822 820
@@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd)
834 /* scan floppy drives */ 832 /* scan floppy drives */
835 833
836 swim_drive(base, INTERNAL_DRIVE); 834 swim_drive(base, INTERNAL_DRIVE);
837 if (swim_readbit(base, DRIVE_PRESENT)) 835 if (swim_readbit(base, DRIVE_PRESENT) &&
836 !swim_readbit(base, ONEMEG_DRIVE))
838 swim_add_floppy(swd, INTERNAL_DRIVE); 837 swim_add_floppy(swd, INTERNAL_DRIVE);
839 swim_drive(base, EXTERNAL_DRIVE); 838 swim_drive(base, EXTERNAL_DRIVE);
840 if (swim_readbit(base, DRIVE_PRESENT)) 839 if (swim_readbit(base, DRIVE_PRESENT) &&
840 !swim_readbit(base, ONEMEG_DRIVE))
841 swim_add_floppy(swd, EXTERNAL_DRIVE); 841 swim_add_floppy(swd, EXTERNAL_DRIVE);
842 842
843 /* register floppy drives */ 843 /* register floppy drives */
@@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd)
861 &swd->lock); 861 &swd->lock);
862 if (!swd->unit[drive].disk->queue) { 862 if (!swd->unit[drive].disk->queue) {
863 err = -ENOMEM; 863 err = -ENOMEM;
864 put_disk(swd->unit[drive].disk);
865 goto exit_put_disks; 864 goto exit_put_disks;
866 } 865 }
867 blk_queue_bounce_limit(swd->unit[drive].disk->queue, 866 blk_queue_bounce_limit(swd->unit[drive].disk->queue,
@@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev)
911 goto out; 910 goto out;
912 } 911 }
913 912
914 swim_base = ioremap(res->start, resource_size(res)); 913 swim_base = (struct swim __iomem *)res->start;
915 if (!swim_base) { 914 if (!swim_base) {
916 ret = -ENOMEM; 915 ret = -ENOMEM;
917 goto out_release_io; 916 goto out_release_io;
@@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev)
923 if (!get_swim_mode(swim_base)) { 922 if (!get_swim_mode(swim_base)) {
924 printk(KERN_INFO "SWIM device not found !\n"); 923 printk(KERN_INFO "SWIM device not found !\n");
925 ret = -ENODEV; 924 ret = -ENODEV;
926 goto out_iounmap; 925 goto out_release_io;
927 } 926 }
928 927
929 /* set platform driver data */ 928 /* set platform driver data */
@@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev)
931 swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL); 930 swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL);
932 if (!swd) { 931 if (!swd) {
933 ret = -ENOMEM; 932 ret = -ENOMEM;
934 goto out_iounmap; 933 goto out_release_io;
935 } 934 }
936 platform_set_drvdata(dev, swd); 935 platform_set_drvdata(dev, swd);
937 936
@@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev)
945 944
946out_kfree: 945out_kfree:
947 kfree(swd); 946 kfree(swd);
948out_iounmap:
949 iounmap(swim_base);
950out_release_io: 947out_release_io:
951 release_mem_region(res->start, resource_size(res)); 948 release_mem_region(res->start, resource_size(res));
952out: 949out:
@@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev)
974 for (drive = 0; drive < swd->floppy_count; drive++) 971 for (drive = 0; drive < swd->floppy_count; drive++)
975 floppy_eject(&swd->unit[drive]); 972 floppy_eject(&swd->unit[drive]);
976 973
977 iounmap(swd->base);
978
979 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 974 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
980 if (res) 975 if (res)
981 release_mem_region(res->start, resource_size(res)); 976 release_mem_region(res->start, resource_size(res));
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index af51015d056e..469541c1e51e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -148,7 +148,7 @@ struct swim3 {
148#define MOTOR_ON 2 148#define MOTOR_ON 2
149#define RELAX 3 /* also eject in progress */ 149#define RELAX 3 /* also eject in progress */
150#define READ_DATA_0 4 150#define READ_DATA_0 4
151#define TWOMEG_DRIVE 5 151#define ONEMEG_DRIVE 5
152#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */ 152#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
153#define DRIVE_PRESENT 7 153#define DRIVE_PRESENT 7
154#define DISK_IN 8 154#define DISK_IN 8
@@ -156,9 +156,9 @@ struct swim3 {
156#define TRACK_ZERO 10 156#define TRACK_ZERO 10
157#define TACHO 11 157#define TACHO 11
158#define READ_DATA_1 12 158#define READ_DATA_1 12
159#define MFM_MODE 13 159#define GCR_MODE 13
160#define SEEK_COMPLETE 14 160#define SEEK_COMPLETE 14
161#define ONEMEG_MEDIA 15 161#define TWOMEG_MEDIA 15
162 162
163/* Definitions of values used in writing and formatting */ 163/* Definitions of values used in writing and formatting */
164#define DATA_ESCAPE 0x99 164#define DATA_ESCAPE 0x99
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index d1c0b60e9326..6dc177bf4c42 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -33,6 +33,7 @@ config HISILICON_LPC
33 bool "Support for ISA I/O space on HiSilicon Hip06/7" 33 bool "Support for ISA I/O space on HiSilicon Hip06/7"
34 depends on ARM64 && (ARCH_HISI || COMPILE_TEST) 34 depends on ARM64 && (ARCH_HISI || COMPILE_TEST)
35 select INDIRECT_PIO 35 select INDIRECT_PIO
36 select MFD_CORE if ACPI
36 help 37 help
37 Driver to enable I/O access to devices attached to the Low Pin 38 Driver to enable I/O access to devices attached to the Low Pin
38 Count bus on the HiSilicon Hip06/7 SoC. 39 Count bus on the HiSilicon Hip06/7 SoC.
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 8327478effd0..bfc566d3f31a 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2371,7 +2371,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
2371 if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) 2371 if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
2372 return media_changed(cdi, 1); 2372 return media_changed(cdi, 1);
2373 2373
2374 if ((unsigned int)arg >= cdi->capacity) 2374 if (arg >= cdi->capacity)
2375 return -EINVAL; 2375 return -EINVAL;
2376 2376
2377 info = kmalloc(sizeof(*info), GFP_KERNEL); 2377 info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 3cd3aae24d6d..cd888d4ee605 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -261,6 +261,7 @@
261#include <linux/ptrace.h> 261#include <linux/ptrace.h>
262#include <linux/workqueue.h> 262#include <linux/workqueue.h>
263#include <linux/irq.h> 263#include <linux/irq.h>
264#include <linux/ratelimit.h>
264#include <linux/syscalls.h> 265#include <linux/syscalls.h>
265#include <linux/completion.h> 266#include <linux/completion.h>
266#include <linux/uuid.h> 267#include <linux/uuid.h>
@@ -438,6 +439,16 @@ static void _crng_backtrack_protect(struct crng_state *crng,
438static void process_random_ready_list(void); 439static void process_random_ready_list(void);
439static void _get_random_bytes(void *buf, int nbytes); 440static void _get_random_bytes(void *buf, int nbytes);
440 441
442static struct ratelimit_state unseeded_warning =
443 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
444static struct ratelimit_state urandom_warning =
445 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
446
447static int ratelimit_disable __read_mostly;
448
449module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
450MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
451
441/********************************************************************** 452/**********************************************************************
442 * 453 *
443 * OS independent entropy store. Here are the functions which handle 454 * OS independent entropy store. Here are the functions which handle
@@ -789,7 +800,7 @@ static void crng_initialize(struct crng_state *crng)
789} 800}
790 801
791#ifdef CONFIG_NUMA 802#ifdef CONFIG_NUMA
792static void numa_crng_init(void) 803static void do_numa_crng_init(struct work_struct *work)
793{ 804{
794 int i; 805 int i;
795 struct crng_state *crng; 806 struct crng_state *crng;
@@ -810,6 +821,13 @@ static void numa_crng_init(void)
810 kfree(pool); 821 kfree(pool);
811 } 822 }
812} 823}
824
825static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
826
827static void numa_crng_init(void)
828{
829 schedule_work(&numa_crng_init_work);
830}
813#else 831#else
814static void numa_crng_init(void) {} 832static void numa_crng_init(void) {}
815#endif 833#endif
@@ -925,6 +943,18 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
925 process_random_ready_list(); 943 process_random_ready_list();
926 wake_up_interruptible(&crng_init_wait); 944 wake_up_interruptible(&crng_init_wait);
927 pr_notice("random: crng init done\n"); 945 pr_notice("random: crng init done\n");
946 if (unseeded_warning.missed) {
947 pr_notice("random: %d get_random_xx warning(s) missed "
948 "due to ratelimiting\n",
949 unseeded_warning.missed);
950 unseeded_warning.missed = 0;
951 }
952 if (urandom_warning.missed) {
953 pr_notice("random: %d urandom warning(s) missed "
954 "due to ratelimiting\n",
955 urandom_warning.missed);
956 urandom_warning.missed = 0;
957 }
928 } 958 }
929} 959}
930 960
@@ -1565,8 +1595,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
1565#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM 1595#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1566 print_once = true; 1596 print_once = true;
1567#endif 1597#endif
1568 pr_notice("random: %s called from %pS with crng_init=%d\n", 1598 if (__ratelimit(&unseeded_warning))
1569 func_name, caller, crng_init); 1599 pr_notice("random: %s called from %pS with crng_init=%d\n",
1600 func_name, caller, crng_init);
1570} 1601}
1571 1602
1572/* 1603/*
@@ -1760,6 +1791,10 @@ static int rand_initialize(void)
1760 init_std_data(&blocking_pool); 1791 init_std_data(&blocking_pool);
1761 crng_initialize(&primary_crng); 1792 crng_initialize(&primary_crng);
1762 crng_global_init_time = jiffies; 1793 crng_global_init_time = jiffies;
1794 if (ratelimit_disable) {
1795 urandom_warning.interval = 0;
1796 unseeded_warning.interval = 0;
1797 }
1763 return 0; 1798 return 0;
1764} 1799}
1765early_initcall(rand_initialize); 1800early_initcall(rand_initialize);
@@ -1827,9 +1862,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1827 1862
1828 if (!crng_ready() && maxwarn > 0) { 1863 if (!crng_ready() && maxwarn > 0) {
1829 maxwarn--; 1864 maxwarn--;
1830 printk(KERN_NOTICE "random: %s: uninitialized urandom read " 1865 if (__ratelimit(&urandom_warning))
1831 "(%zd bytes read)\n", 1866 printk(KERN_NOTICE "random: %s: uninitialized "
1832 current->comm, nbytes); 1867 "urandom read (%zd bytes read)\n",
1868 current->comm, nbytes);
1833 spin_lock_irqsave(&primary_crng.lock, flags); 1869 spin_lock_irqsave(&primary_crng.lock, flags);
1834 crng_init_cnt = 0; 1870 crng_init_cnt = 0;
1835 spin_unlock_irqrestore(&primary_crng.lock, flags); 1871 spin_unlock_irqrestore(&primary_crng.lock, flags);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 468f06134012..21085515814f 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void)
422 } 422 }
423} 423}
424 424
425static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, 425static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
426 int pages) 426 int pages)
427{ 427{
428 struct port_buffer *buf; 428 struct port_buffer *buf;
@@ -445,16 +445,16 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
445 return buf; 445 return buf;
446 } 446 }
447 447
448 if (is_rproc_serial(vq->vdev)) { 448 if (is_rproc_serial(vdev)) {
449 /* 449 /*
450 * Allocate DMA memory from ancestor. When a virtio 450 * Allocate DMA memory from ancestor. When a virtio
451 * device is created by remoteproc, the DMA memory is 451 * device is created by remoteproc, the DMA memory is
452 * associated with the grandparent device: 452 * associated with the grandparent device:
453 * vdev => rproc => platform-dev. 453 * vdev => rproc => platform-dev.
454 */ 454 */
455 if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) 455 if (!vdev->dev.parent || !vdev->dev.parent->parent)
456 goto free_buf; 456 goto free_buf;
457 buf->dev = vq->vdev->dev.parent->parent; 457 buf->dev = vdev->dev.parent->parent;
458 458
459 /* Increase device refcnt to avoid freeing it */ 459 /* Increase device refcnt to avoid freeing it */
460 get_device(buf->dev); 460 get_device(buf->dev);
@@ -838,7 +838,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
838 838
839 count = min((size_t)(32 * 1024), count); 839 count = min((size_t)(32 * 1024), count);
840 840
841 buf = alloc_buf(port->out_vq, count, 0); 841 buf = alloc_buf(port->portdev->vdev, count, 0);
842 if (!buf) 842 if (!buf)
843 return -ENOMEM; 843 return -ENOMEM;
844 844
@@ -957,7 +957,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
957 if (ret < 0) 957 if (ret < 0)
958 goto error_out; 958 goto error_out;
959 959
960 buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); 960 buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
961 if (!buf) { 961 if (!buf) {
962 ret = -ENOMEM; 962 ret = -ENOMEM;
963 goto error_out; 963 goto error_out;
@@ -1374,7 +1374,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1374 1374
1375 nr_added_bufs = 0; 1375 nr_added_bufs = 0;
1376 do { 1376 do {
1377 buf = alloc_buf(vq, PAGE_SIZE, 0); 1377 buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
1378 if (!buf) 1378 if (!buf)
1379 break; 1379 break;
1380 1380
@@ -1402,7 +1402,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1402{ 1402{
1403 char debugfs_name[16]; 1403 char debugfs_name[16];
1404 struct port *port; 1404 struct port *port;
1405 struct port_buffer *buf;
1406 dev_t devt; 1405 dev_t devt;
1407 unsigned int nr_added_bufs; 1406 unsigned int nr_added_bufs;
1408 int err; 1407 int err;
@@ -1513,8 +1512,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1513 return 0; 1512 return 0;
1514 1513
1515free_inbufs: 1514free_inbufs:
1516 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1517 free_buf(buf, true);
1518free_device: 1515free_device:
1519 device_destroy(pdrvdata.class, port->dev->devt); 1516 device_destroy(pdrvdata.class, port->dev->devt);
1520free_cdev: 1517free_cdev:
@@ -1539,34 +1536,14 @@ static void remove_port(struct kref *kref)
1539 1536
1540static void remove_port_data(struct port *port) 1537static void remove_port_data(struct port *port)
1541{ 1538{
1542 struct port_buffer *buf;
1543
1544 spin_lock_irq(&port->inbuf_lock); 1539 spin_lock_irq(&port->inbuf_lock);
1545 /* Remove unused data this port might have received. */ 1540 /* Remove unused data this port might have received. */
1546 discard_port_data(port); 1541 discard_port_data(port);
1547 spin_unlock_irq(&port->inbuf_lock); 1542 spin_unlock_irq(&port->inbuf_lock);
1548 1543
1549 /* Remove buffers we queued up for the Host to send us data in. */
1550 do {
1551 spin_lock_irq(&port->inbuf_lock);
1552 buf = virtqueue_detach_unused_buf(port->in_vq);
1553 spin_unlock_irq(&port->inbuf_lock);
1554 if (buf)
1555 free_buf(buf, true);
1556 } while (buf);
1557
1558 spin_lock_irq(&port->outvq_lock); 1544 spin_lock_irq(&port->outvq_lock);
1559 reclaim_consumed_buffers(port); 1545 reclaim_consumed_buffers(port);
1560 spin_unlock_irq(&port->outvq_lock); 1546 spin_unlock_irq(&port->outvq_lock);
1561
1562 /* Free pending buffers from the out-queue. */
1563 do {
1564 spin_lock_irq(&port->outvq_lock);
1565 buf = virtqueue_detach_unused_buf(port->out_vq);
1566 spin_unlock_irq(&port->outvq_lock);
1567 if (buf)
1568 free_buf(buf, true);
1569 } while (buf);
1570} 1547}
1571 1548
1572/* 1549/*
@@ -1791,13 +1768,24 @@ static void control_work_handler(struct work_struct *work)
1791 spin_unlock(&portdev->c_ivq_lock); 1768 spin_unlock(&portdev->c_ivq_lock);
1792} 1769}
1793 1770
1771static void flush_bufs(struct virtqueue *vq, bool can_sleep)
1772{
1773 struct port_buffer *buf;
1774 unsigned int len;
1775
1776 while ((buf = virtqueue_get_buf(vq, &len)))
1777 free_buf(buf, can_sleep);
1778}
1779
1794static void out_intr(struct virtqueue *vq) 1780static void out_intr(struct virtqueue *vq)
1795{ 1781{
1796 struct port *port; 1782 struct port *port;
1797 1783
1798 port = find_port_by_vq(vq->vdev->priv, vq); 1784 port = find_port_by_vq(vq->vdev->priv, vq);
1799 if (!port) 1785 if (!port) {
1786 flush_bufs(vq, false);
1800 return; 1787 return;
1788 }
1801 1789
1802 wake_up_interruptible(&port->waitqueue); 1790 wake_up_interruptible(&port->waitqueue);
1803} 1791}
@@ -1808,8 +1796,10 @@ static void in_intr(struct virtqueue *vq)
1808 unsigned long flags; 1796 unsigned long flags;
1809 1797
1810 port = find_port_by_vq(vq->vdev->priv, vq); 1798 port = find_port_by_vq(vq->vdev->priv, vq);
1811 if (!port) 1799 if (!port) {
1800 flush_bufs(vq, false);
1812 return; 1801 return;
1802 }
1813 1803
1814 spin_lock_irqsave(&port->inbuf_lock, flags); 1804 spin_lock_irqsave(&port->inbuf_lock, flags);
1815 port->inbuf = get_inbuf(port); 1805 port->inbuf = get_inbuf(port);
@@ -1984,24 +1974,54 @@ static const struct file_operations portdev_fops = {
1984 1974
1985static void remove_vqs(struct ports_device *portdev) 1975static void remove_vqs(struct ports_device *portdev)
1986{ 1976{
1977 struct virtqueue *vq;
1978
1979 virtio_device_for_each_vq(portdev->vdev, vq) {
1980 struct port_buffer *buf;
1981
1982 flush_bufs(vq, true);
1983 while ((buf = virtqueue_detach_unused_buf(vq)))
1984 free_buf(buf, true);
1985 }
1987 portdev->vdev->config->del_vqs(portdev->vdev); 1986 portdev->vdev->config->del_vqs(portdev->vdev);
1988 kfree(portdev->in_vqs); 1987 kfree(portdev->in_vqs);
1989 kfree(portdev->out_vqs); 1988 kfree(portdev->out_vqs);
1990} 1989}
1991 1990
1992static void remove_controlq_data(struct ports_device *portdev) 1991static void virtcons_remove(struct virtio_device *vdev)
1993{ 1992{
1994 struct port_buffer *buf; 1993 struct ports_device *portdev;
1995 unsigned int len; 1994 struct port *port, *port2;
1996 1995
1997 if (!use_multiport(portdev)) 1996 portdev = vdev->priv;
1998 return;
1999 1997
2000 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) 1998 spin_lock_irq(&pdrvdata_lock);
2001 free_buf(buf, true); 1999 list_del(&portdev->list);
2000 spin_unlock_irq(&pdrvdata_lock);
2002 2001
2003 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) 2002 /* Disable interrupts for vqs */
2004 free_buf(buf, true); 2003 vdev->config->reset(vdev);
2004 /* Finish up work that's lined up */
2005 if (use_multiport(portdev))
2006 cancel_work_sync(&portdev->control_work);
2007 else
2008 cancel_work_sync(&portdev->config_work);
2009
2010 list_for_each_entry_safe(port, port2, &portdev->ports, list)
2011 unplug_port(port);
2012
2013 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2014
2015 /*
2016 * When yanking out a device, we immediately lose the
2017 * (device-side) queues. So there's no point in keeping the
2018 * guest side around till we drop our final reference. This
2019 * also means that any ports which are in an open state will
2020 * have to just stop using the port, as the vqs are going
2021 * away.
2022 */
2023 remove_vqs(portdev);
2024 kfree(portdev);
2005} 2025}
2006 2026
2007/* 2027/*
@@ -2070,6 +2090,7 @@ static int virtcons_probe(struct virtio_device *vdev)
2070 2090
2071 spin_lock_init(&portdev->ports_lock); 2091 spin_lock_init(&portdev->ports_lock);
2072 INIT_LIST_HEAD(&portdev->ports); 2092 INIT_LIST_HEAD(&portdev->ports);
2093 INIT_LIST_HEAD(&portdev->list);
2073 2094
2074 virtio_device_ready(portdev->vdev); 2095 virtio_device_ready(portdev->vdev);
2075 2096
@@ -2087,8 +2108,15 @@ static int virtcons_probe(struct virtio_device *vdev)
2087 if (!nr_added_bufs) { 2108 if (!nr_added_bufs) {
2088 dev_err(&vdev->dev, 2109 dev_err(&vdev->dev,
2089 "Error allocating buffers for control queue\n"); 2110 "Error allocating buffers for control queue\n");
2090 err = -ENOMEM; 2111 /*
2091 goto free_vqs; 2112 * The host might want to notify mgmt sw about device
2113 * add failure.
2114 */
2115 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2116 VIRTIO_CONSOLE_DEVICE_READY, 0);
2117 /* Device was functional: we need full cleanup. */
2118 virtcons_remove(vdev);
2119 return -ENOMEM;
2092 } 2120 }
2093 } else { 2121 } else {
2094 /* 2122 /*
@@ -2119,11 +2147,6 @@ static int virtcons_probe(struct virtio_device *vdev)
2119 2147
2120 return 0; 2148 return 0;
2121 2149
2122free_vqs:
2123 /* The host might want to notify mgmt sw about device add failure */
2124 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2125 VIRTIO_CONSOLE_DEVICE_READY, 0);
2126 remove_vqs(portdev);
2127free_chrdev: 2150free_chrdev:
2128 unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 2151 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2129free: 2152free:
@@ -2132,43 +2155,6 @@ fail:
2132 return err; 2155 return err;
2133} 2156}
2134 2157
2135static void virtcons_remove(struct virtio_device *vdev)
2136{
2137 struct ports_device *portdev;
2138 struct port *port, *port2;
2139
2140 portdev = vdev->priv;
2141
2142 spin_lock_irq(&pdrvdata_lock);
2143 list_del(&portdev->list);
2144 spin_unlock_irq(&pdrvdata_lock);
2145
2146 /* Disable interrupts for vqs */
2147 vdev->config->reset(vdev);
2148 /* Finish up work that's lined up */
2149 if (use_multiport(portdev))
2150 cancel_work_sync(&portdev->control_work);
2151 else
2152 cancel_work_sync(&portdev->config_work);
2153
2154 list_for_each_entry_safe(port, port2, &portdev->ports, list)
2155 unplug_port(port);
2156
2157 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2158
2159 /*
2160 * When yanking out a device, we immediately lose the
2161 * (device-side) queues. So there's no point in keeping the
2162 * guest side around till we drop our final reference. This
2163 * also means that any ports which are in an open state will
2164 * have to just stop using the port, as the vqs are going
2165 * away.
2166 */
2167 remove_controlq_data(portdev);
2168 remove_vqs(portdev);
2169 kfree(portdev);
2170}
2171
2172static struct virtio_device_id id_table[] = { 2158static struct virtio_device_id id_table[] = {
2173 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, 2159 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
2174 { 0 }, 2160 { 0 },
@@ -2209,7 +2195,6 @@ static int virtcons_freeze(struct virtio_device *vdev)
2209 */ 2195 */
2210 if (use_multiport(portdev)) 2196 if (use_multiport(portdev))
2211 virtqueue_disable_cb(portdev->c_ivq); 2197 virtqueue_disable_cb(portdev->c_ivq);
2212 remove_controlq_data(portdev);
2213 2198
2214 list_for_each_entry(port, &portdev->ports, list) { 2199 list_for_each_entry(port, &portdev->ports, list) {
2215 virtqueue_disable_cb(port->in_vq); 2200 virtqueue_disable_cb(port->in_vq);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 7f56fe5183f2..de55c7d57438 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -71,16 +71,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ
71 71
72 Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS. 72 Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
73 73
74config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
75 bool "Broadcom STB AVS CPUfreq driver sysfs debug capability"
76 depends on ARM_BRCMSTB_AVS_CPUFREQ
77 help
78 Enabling this option turns on debug support via sysfs under
79 /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and
80 write some AVS mailbox registers through sysfs entries.
81
82 If in doubt, say N.
83
84config ARM_EXYNOS5440_CPUFREQ 74config ARM_EXYNOS5440_CPUFREQ
85 tristate "SAMSUNG EXYNOS5440" 75 tristate "SAMSUNG EXYNOS5440"
86 depends on SOC_EXYNOS5440 76 depends on SOC_EXYNOS5440
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 6cdac1aaf23c..b07559b9ed99 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -49,13 +49,6 @@
49#include <linux/platform_device.h> 49#include <linux/platform_device.h>
50#include <linux/semaphore.h> 50#include <linux/semaphore.h>
51 51
52#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
53#include <linux/ctype.h>
54#include <linux/debugfs.h>
55#include <linux/slab.h>
56#include <linux/uaccess.h>
57#endif
58
59/* Max number of arguments AVS calls take */ 52/* Max number of arguments AVS calls take */
60#define AVS_MAX_CMD_ARGS 4 53#define AVS_MAX_CMD_ARGS 4
61/* 54/*
@@ -182,88 +175,11 @@ struct private_data {
182 void __iomem *base; 175 void __iomem *base;
183 void __iomem *avs_intr_base; 176 void __iomem *avs_intr_base;
184 struct device *dev; 177 struct device *dev;
185#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
186 struct dentry *debugfs;
187#endif
188 struct completion done; 178 struct completion done;
189 struct semaphore sem; 179 struct semaphore sem;
190 struct pmap pmap; 180 struct pmap pmap;
191}; 181};
192 182
193#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
194
195enum debugfs_format {
196 DEBUGFS_NORMAL,
197 DEBUGFS_FLOAT,
198 DEBUGFS_REV,
199};
200
201struct debugfs_data {
202 struct debugfs_entry *entry;
203 struct private_data *priv;
204};
205
206struct debugfs_entry {
207 char *name;
208 u32 offset;
209 fmode_t mode;
210 enum debugfs_format format;
211};
212
213#define DEBUGFS_ENTRY(name, mode, format) { \
214 #name, AVS_MBOX_##name, mode, format \
215}
216
217/*
218 * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly.
219 */
220#define AVS_MBOX_PARAM1 AVS_MBOX_PARAM(0)
221#define AVS_MBOX_PARAM2 AVS_MBOX_PARAM(1)
222#define AVS_MBOX_PARAM3 AVS_MBOX_PARAM(2)
223#define AVS_MBOX_PARAM4 AVS_MBOX_PARAM(3)
224
225/*
226 * This table stores the name, access permissions and offset for each hardware
227 * register and is used to generate debugfs entries.
228 */
229static struct debugfs_entry debugfs_entries[] = {
230 DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL),
231 DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL),
232 DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT),
233 DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT),
234 DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT),
235 DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT),
236 DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL),
237 DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL),
238 DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL),
239 DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL),
240 DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV),
241 DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL),
242 DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL),
243 DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL),
244 DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL),
245 DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL),
246 DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT),
247 DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT),
248 DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT),
249 DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT),
250 DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL),
251};
252
253static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int);
254
255static char *__strtolower(char *s)
256{
257 char *p;
258
259 for (p = s; *p; p++)
260 *p = tolower(*p);
261
262 return s;
263}
264
265#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
266
267static void __iomem *__map_region(const char *name) 183static void __iomem *__map_region(const char *name)
268{ 184{
269 struct device_node *np; 185 struct device_node *np;
@@ -516,238 +432,6 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
516 return table; 432 return table;
517} 433}
518 434
519#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
520
521#define MANT(x) (unsigned int)(abs((x)) / 1000)
522#define FRAC(x) (unsigned int)(abs((x)) - abs((x)) / 1000 * 1000)
523
524static int brcm_avs_debug_show(struct seq_file *s, void *data)
525{
526 struct debugfs_data *dbgfs = s->private;
527 void __iomem *base;
528 u32 val, offset;
529
530 if (!dbgfs) {
531 seq_puts(s, "No device pointer\n");
532 return 0;
533 }
534
535 base = dbgfs->priv->base;
536 offset = dbgfs->entry->offset;
537 val = readl(base + offset);
538 switch (dbgfs->entry->format) {
539 case DEBUGFS_NORMAL:
540 seq_printf(s, "%u\n", val);
541 break;
542 case DEBUGFS_FLOAT:
543 seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val));
544 break;
545 case DEBUGFS_REV:
546 seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff),
547 (val >> 16 & 0xff), (val >> 8 & 0xff),
548 val & 0xff);
549 break;
550 }
551 seq_printf(s, "0x%08x\n", val);
552
553 return 0;
554}
555
556#undef MANT
557#undef FRAC
558
559static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf,
560 size_t size, loff_t *ppos)
561{
562 struct seq_file *s = file->private_data;
563 struct debugfs_data *dbgfs = s->private;
564 struct private_data *priv = dbgfs->priv;
565 void __iomem *base, *avs_intr_base;
566 bool use_issue_command = false;
567 unsigned long val, offset;
568 char str[128];
569 int ret;
570 char *str_ptr = str;
571
572 if (size >= sizeof(str))
573 return -E2BIG;
574
575 memset(str, 0, sizeof(str));
576 ret = copy_from_user(str, buf, size);
577 if (ret)
578 return ret;
579
580 base = priv->base;
581 avs_intr_base = priv->avs_intr_base;
582 offset = dbgfs->entry->offset;
583 /*
584 * Special case writing to "command" entry only: if the string starts
585 * with a 'c', we use the driver's __issue_avs_command() function.
586 * Otherwise, we perform a raw write. This should allow testing of raw
587 * access as well as using the higher level function. (Raw access
588 * doesn't clear the firmware return status after issuing the command.)
589 */
590 if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) {
591 use_issue_command = true;
592 str_ptr++;
593 }
594 if (kstrtoul(str_ptr, 0, &val) != 0)
595 return -EINVAL;
596
597 /*
598 * Setting the P-state is a special case. We need to update the CPU
599 * frequency we report.
600 */
601 if (val == AVS_CMD_SET_PSTATE) {
602 struct cpufreq_policy *policy;
603 unsigned int pstate;
604
605 policy = cpufreq_cpu_get(smp_processor_id());
606 /* Read back the P-state we are about to set */
607 pstate = readl(base + AVS_MBOX_PARAM(0));
608 if (use_issue_command) {
609 ret = brcm_avs_target_index(policy, pstate);
610 return ret ? ret : size;
611 }
612 policy->cur = policy->freq_table[pstate].frequency;
613 }
614
615 if (use_issue_command) {
616 ret = __issue_avs_command(priv, val, false, NULL);
617 } else {
618 /* Locking here is not perfect, but is only for debug. */
619 ret = down_interruptible(&priv->sem);
620 if (ret)
621 return ret;
622
623 writel(val, base + offset);
624 /* We have to wake up the firmware to process a command. */
625 if (offset == AVS_MBOX_COMMAND)
626 writel(AVS_CPU_L2_INT_MASK,
627 avs_intr_base + AVS_CPU_L2_SET0);
628 up(&priv->sem);
629 }
630
631 return ret ? ret : size;
632}
633
634static struct debugfs_entry *__find_debugfs_entry(const char *name)
635{
636 int i;
637
638 for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++)
639 if (strcasecmp(debugfs_entries[i].name, name) == 0)
640 return &debugfs_entries[i];
641
642 return NULL;
643}
644
645static int brcm_avs_debug_open(struct inode *inode, struct file *file)
646{
647 struct debugfs_data *data;
648 fmode_t fmode;
649 int ret;
650
651 /*
652 * seq_open(), which is called by single_open(), clears "write" access.
653 * We need write access to some files, so we preserve our access mode
654 * and restore it.
655 */
656 fmode = file->f_mode;
657 /*
658 * Check access permissions even for root. We don't want to be writing
659 * to read-only registers. Access for regular users has already been
660 * checked by the VFS layer.
661 */
662 if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR))
663 return -EACCES;
664
665 data = kmalloc(sizeof(*data), GFP_KERNEL);
666 if (!data)
667 return -ENOMEM;
668 /*
669 * We use the same file system operations for all our debug files. To
670 * produce specific output, we look up the file name upon opening a
671 * debugfs entry and map it to a memory offset. This offset is then used
672 * in the generic "show" function to read a specific register.
673 */
674 data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname);
675 data->priv = inode->i_private;
676
677 ret = single_open(file, brcm_avs_debug_show, data);
678 if (ret)
679 kfree(data);
680 file->f_mode = fmode;
681
682 return ret;
683}
684
685static int brcm_avs_debug_release(struct inode *inode, struct file *file)
686{
687 struct seq_file *seq_priv = file->private_data;
688 struct debugfs_data *data = seq_priv->private;
689
690 kfree(data);
691 return single_release(inode, file);
692}
693
694static const struct file_operations brcm_avs_debug_ops = {
695 .open = brcm_avs_debug_open,
696 .read = seq_read,
697 .write = brcm_avs_seq_write,
698 .llseek = seq_lseek,
699 .release = brcm_avs_debug_release,
700};
701
702static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev)
703{
704 struct private_data *priv = platform_get_drvdata(pdev);
705 struct dentry *dir;
706 int i;
707
708 if (!priv)
709 return;
710
711 dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL);
712 if (IS_ERR_OR_NULL(dir))
713 return;
714 priv->debugfs = dir;
715
716 for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) {
717 /*
718 * The DEBUGFS_ENTRY macro generates uppercase strings. We
719 * convert them to lowercase before creating the debugfs
720 * entries.
721 */
722 char *entry = __strtolower(debugfs_entries[i].name);
723 fmode_t mode = debugfs_entries[i].mode;
724
725 if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode,
726 dir, priv, &brcm_avs_debug_ops)) {
727 priv->debugfs = NULL;
728 debugfs_remove_recursive(dir);
729 break;
730 }
731 }
732}
733
734static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev)
735{
736 struct private_data *priv = platform_get_drvdata(pdev);
737
738 if (priv && priv->debugfs) {
739 debugfs_remove_recursive(priv->debugfs);
740 priv->debugfs = NULL;
741 }
742}
743
744#else
745
746static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {}
747static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {}
748
749#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
750
751/* 435/*
752 * To ensure the right firmware is running we need to 436 * To ensure the right firmware is running we need to
753 * - check the MAGIC matches what we expect 437 * - check the MAGIC matches what we expect
@@ -1016,11 +700,8 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
1016 return ret; 700 return ret;
1017 701
1018 brcm_avs_driver.driver_data = pdev; 702 brcm_avs_driver.driver_data = pdev;
1019 ret = cpufreq_register_driver(&brcm_avs_driver);
1020 if (!ret)
1021 brcm_avs_cpufreq_debug_init(pdev);
1022 703
1023 return ret; 704 return cpufreq_register_driver(&brcm_avs_driver);
1024} 705}
1025 706
1026static int brcm_avs_cpufreq_remove(struct platform_device *pdev) 707static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
@@ -1032,8 +713,6 @@ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
1032 if (ret) 713 if (ret)
1033 return ret; 714 return ret;
1034 715
1035 brcm_avs_cpufreq_debug_exit(pdev);
1036
1037 priv = platform_get_drvdata(pdev); 716 priv = platform_get_drvdata(pdev);
1038 iounmap(priv->base); 717 iounmap(priv->base);
1039 iounmap(priv->avs_intr_base); 718 iounmap(priv->avs_intr_base);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index e6f17825db79..2b90606452a2 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -284,7 +284,7 @@ scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
284 struct clock_info *ci = handle->clk_priv; 284 struct clock_info *ci = handle->clk_priv;
285 struct scmi_clock_info *clk = ci->clk + clk_id; 285 struct scmi_clock_info *clk = ci->clk + clk_id;
286 286
287 if (!clk->name || !clk->name[0]) 287 if (!clk->name[0])
288 return NULL; 288 return NULL;
289 289
290 return clk; 290 return clk;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b0e591eaa71a..e14263fca1c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] =
1459static const u32 vgpr_init_regs[] = 1459static const u32 vgpr_init_regs[] =
1460{ 1460{
1461 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff, 1461 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1462 mmCOMPUTE_RESOURCE_LIMITS, 0, 1462 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1463 mmCOMPUTE_NUM_THREAD_X, 256*4, 1463 mmCOMPUTE_NUM_THREAD_X, 256*4,
1464 mmCOMPUTE_NUM_THREAD_Y, 1, 1464 mmCOMPUTE_NUM_THREAD_Y, 1,
1465 mmCOMPUTE_NUM_THREAD_Z, 1, 1465 mmCOMPUTE_NUM_THREAD_Z, 1,
1466 mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
1466 mmCOMPUTE_PGM_RSRC2, 20, 1467 mmCOMPUTE_PGM_RSRC2, 20,
1467 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1468 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1468 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1469 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] =
1479static const u32 sgpr1_init_regs[] = 1480static const u32 sgpr1_init_regs[] =
1480{ 1481{
1481 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f, 1482 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1482 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, 1483 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1483 mmCOMPUTE_NUM_THREAD_X, 256*5, 1484 mmCOMPUTE_NUM_THREAD_X, 256*5,
1484 mmCOMPUTE_NUM_THREAD_Y, 1, 1485 mmCOMPUTE_NUM_THREAD_Y, 1,
1485 mmCOMPUTE_NUM_THREAD_Z, 1, 1486 mmCOMPUTE_NUM_THREAD_Z, 1,
1487 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1486 mmCOMPUTE_PGM_RSRC2, 20, 1488 mmCOMPUTE_PGM_RSRC2, 20,
1487 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1489 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1488 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1490 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] =
1503 mmCOMPUTE_NUM_THREAD_X, 256*5, 1505 mmCOMPUTE_NUM_THREAD_X, 256*5,
1504 mmCOMPUTE_NUM_THREAD_Y, 1, 1506 mmCOMPUTE_NUM_THREAD_Y, 1,
1505 mmCOMPUTE_NUM_THREAD_Z, 1, 1507 mmCOMPUTE_NUM_THREAD_Z, 1,
1508 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1506 mmCOMPUTE_PGM_RSRC2, 20, 1509 mmCOMPUTE_PGM_RSRC2, 20,
1507 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1510 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1508 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1511 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index ed2f06c9f346..3858820a0055 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -6,5 +6,6 @@ config HSA_AMD
6 tristate "HSA kernel driver for AMD GPU devices" 6 tristate "HSA kernel driver for AMD GPU devices"
7 depends on DRM_AMDGPU && X86_64 7 depends on DRM_AMDGPU && X86_64
8 imply AMD_IOMMU_V2 8 imply AMD_IOMMU_V2
9 select MMU_NOTIFIER
9 help 10 help
10 Enable this if you want to use HSA features on AMD GPU devices. 11 Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index cd679cf1fd30..59808a39ecf4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -749,12 +749,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
749 struct timespec64 time; 749 struct timespec64 time;
750 750
751 dev = kfd_device_by_id(args->gpu_id); 751 dev = kfd_device_by_id(args->gpu_id);
752 if (dev == NULL) 752 if (dev)
753 return -EINVAL; 753 /* Reading GPU clock counter from KGD */
754 754 args->gpu_clock_counter =
755 /* Reading GPU clock counter from KGD */ 755 dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
756 args->gpu_clock_counter = 756 else
757 dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); 757 /* Node without GPU resource */
758 args->gpu_clock_counter = 0;
758 759
759 /* No access to rdtsc. Using raw monotonic time */ 760 /* No access to rdtsc. Using raw monotonic time */
760 getrawmonotonic64(&time); 761 getrawmonotonic64(&time);
@@ -1147,7 +1148,7 @@ err_unlock:
1147 return ret; 1148 return ret;
1148} 1149}
1149 1150
1150bool kfd_dev_is_large_bar(struct kfd_dev *dev) 1151static bool kfd_dev_is_large_bar(struct kfd_dev *dev)
1151{ 1152{
1152 struct kfd_local_mem_info mem_info; 1153 struct kfd_local_mem_info mem_info;
1153 1154
@@ -1421,7 +1422,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1421 1422
1422 pdd = kfd_get_process_device_data(dev, p); 1423 pdd = kfd_get_process_device_data(dev, p);
1423 if (!pdd) { 1424 if (!pdd) {
1424 err = PTR_ERR(pdd); 1425 err = -EINVAL;
1425 goto bind_process_to_device_failed; 1426 goto bind_process_to_device_failed;
1426 } 1427 }
1427 1428
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4e2f379ce217..1dd1142246c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4557,6 +4557,7 @@ static int dm_update_crtcs_state(struct dc *dc,
4557 struct amdgpu_dm_connector *aconnector = NULL; 4557 struct amdgpu_dm_connector *aconnector = NULL;
4558 struct drm_connector_state *new_con_state = NULL; 4558 struct drm_connector_state *new_con_state = NULL;
4559 struct dm_connector_state *dm_conn_state = NULL; 4559 struct dm_connector_state *dm_conn_state = NULL;
4560 struct drm_plane_state *new_plane_state = NULL;
4560 4561
4561 new_stream = NULL; 4562 new_stream = NULL;
4562 4563
@@ -4564,6 +4565,13 @@ static int dm_update_crtcs_state(struct dc *dc,
4564 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4565 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4565 acrtc = to_amdgpu_crtc(crtc); 4566 acrtc = to_amdgpu_crtc(crtc);
4566 4567
4568 new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
4569
4570 if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
4571 ret = -EINVAL;
4572 goto fail;
4573 }
4574
4567 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 4575 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
4568 4576
4569 /* TODO This hack should go away */ 4577 /* TODO This hack should go away */
@@ -4760,7 +4768,7 @@ static int dm_update_planes_state(struct dc *dc,
4760 if (!dm_old_crtc_state->stream) 4768 if (!dm_old_crtc_state->stream)
4761 continue; 4769 continue;
4762 4770
4763 DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n", 4771 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
4764 plane->base.id, old_plane_crtc->base.id); 4772 plane->base.id, old_plane_crtc->base.id);
4765 4773
4766 if (!dc_remove_plane_from_context( 4774 if (!dc_remove_plane_from_context(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 490017df371d..4be21bf54749 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -329,14 +329,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
329{ 329{
330 int src; 330 int src;
331 struct irq_list_head *lh; 331 struct irq_list_head *lh;
332 unsigned long irq_table_flags;
332 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); 333 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
333
334 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { 334 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
335 335 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
336 /* The handler was removed from the table, 336 /* The handler was removed from the table,
337 * it means it is safe to flush all the 'work' 337 * it means it is safe to flush all the 'work'
338 * (because no code can schedule a new one). */ 338 * (because no code can schedule a new one). */
339 lh = &adev->dm.irq_handler_list_low_tab[src]; 339 lh = &adev->dm.irq_handler_list_low_tab[src];
340 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
340 flush_work(&lh->work); 341 flush_work(&lh->work);
341 } 342 }
342} 343}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 8291d74f26bc..ace9ad578ca0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -161,6 +161,11 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
161 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 161 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
162 struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; 162 struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
163 163
164 if (amdgpu_dm_connector->edid) {
165 kfree(amdgpu_dm_connector->edid);
166 amdgpu_dm_connector->edid = NULL;
167 }
168
164 drm_encoder_cleanup(&amdgpu_encoder->base); 169 drm_encoder_cleanup(&amdgpu_encoder->base);
165 kfree(amdgpu_encoder); 170 kfree(amdgpu_encoder);
166 drm_connector_cleanup(connector); 171 drm_connector_cleanup(connector);
@@ -181,28 +186,22 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
181void dm_dp_mst_dc_sink_create(struct drm_connector *connector) 186void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
182{ 187{
183 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 188 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
184 struct edid *edid;
185 struct dc_sink *dc_sink; 189 struct dc_sink *dc_sink;
186 struct dc_sink_init_data init_params = { 190 struct dc_sink_init_data init_params = {
187 .link = aconnector->dc_link, 191 .link = aconnector->dc_link,
188 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 192 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
189 193
194 /* FIXME none of this is safe. we shouldn't touch aconnector here in
195 * atomic_check
196 */
197
190 /* 198 /*
191 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists 199 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
192 */ 200 */
193 if (!aconnector->port || !aconnector->port->aux.ddc.algo) 201 if (!aconnector->port || !aconnector->port->aux.ddc.algo)
194 return; 202 return;
195 203
196 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 204 ASSERT(aconnector->edid);
197
198 if (!edid) {
199 drm_mode_connector_update_edid_property(
200 &aconnector->base,
201 NULL);
202 return;
203 }
204
205 aconnector->edid = edid;
206 205
207 dc_sink = dc_link_add_remote_sink( 206 dc_sink = dc_link_add_remote_sink(
208 aconnector->dc_link, 207 aconnector->dc_link,
@@ -215,9 +214,6 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
215 214
216 amdgpu_dm_add_sink_to_freesync_module( 215 amdgpu_dm_add_sink_to_freesync_module(
217 connector, aconnector->edid); 216 connector, aconnector->edid);
218
219 drm_mode_connector_update_edid_property(
220 &aconnector->base, aconnector->edid);
221} 217}
222 218
223static int dm_dp_mst_get_modes(struct drm_connector *connector) 219static int dm_dp_mst_get_modes(struct drm_connector *connector)
@@ -230,10 +226,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
230 226
231 if (!aconnector->edid) { 227 if (!aconnector->edid) {
232 struct edid *edid; 228 struct edid *edid;
233 struct dc_sink *dc_sink;
234 struct dc_sink_init_data init_params = {
235 .link = aconnector->dc_link,
236 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
237 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 229 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
238 230
239 if (!edid) { 231 if (!edid) {
@@ -244,11 +236,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
244 } 236 }
245 237
246 aconnector->edid = edid; 238 aconnector->edid = edid;
239 }
247 240
241 if (!aconnector->dc_sink) {
242 struct dc_sink *dc_sink;
243 struct dc_sink_init_data init_params = {
244 .link = aconnector->dc_link,
245 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
248 dc_sink = dc_link_add_remote_sink( 246 dc_sink = dc_link_add_remote_sink(
249 aconnector->dc_link, 247 aconnector->dc_link,
250 (uint8_t *)edid, 248 (uint8_t *)aconnector->edid,
251 (edid->extensions + 1) * EDID_LENGTH, 249 (aconnector->edid->extensions + 1) * EDID_LENGTH,
252 &init_params); 250 &init_params);
253 251
254 dc_sink->priv = aconnector; 252 dc_sink->priv = aconnector;
@@ -256,12 +254,12 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
256 254
257 if (aconnector->dc_sink) 255 if (aconnector->dc_sink)
258 amdgpu_dm_add_sink_to_freesync_module( 256 amdgpu_dm_add_sink_to_freesync_module(
259 connector, edid); 257 connector, aconnector->edid);
260
261 drm_mode_connector_update_edid_property(
262 &aconnector->base, edid);
263 } 258 }
264 259
260 drm_mode_connector_update_edid_property(
261 &aconnector->base, aconnector->edid);
262
265 ret = drm_add_edid_modes(connector, aconnector->edid); 263 ret = drm_add_edid_modes(connector, aconnector->edid);
266 264
267 return ret; 265 return ret;
@@ -424,14 +422,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
424 dc_sink_release(aconnector->dc_sink); 422 dc_sink_release(aconnector->dc_sink);
425 aconnector->dc_sink = NULL; 423 aconnector->dc_sink = NULL;
426 } 424 }
427 if (aconnector->edid) {
428 kfree(aconnector->edid);
429 aconnector->edid = NULL;
430 }
431
432 drm_mode_connector_update_edid_property(
433 &aconnector->base,
434 NULL);
435 425
436 aconnector->mst_connected = false; 426 aconnector->mst_connected = false;
437} 427}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 134069f36482..39f1db4acda4 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -4451,6 +4451,7 @@ drm_reset_display_info(struct drm_connector *connector)
4451 info->max_tmds_clock = 0; 4451 info->max_tmds_clock = 0;
4452 info->dvi_dual = false; 4452 info->dvi_dual = false;
4453 info->has_hdmi_infoframe = false; 4453 info->has_hdmi_infoframe = false;
4454 memset(&info->hdmi, 0, sizeof(info->hdmi));
4454 4455
4455 info->non_desktop = 0; 4456 info->non_desktop = 0;
4456} 4457}
@@ -4462,17 +4463,11 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
4462 4463
4463 u32 quirks = edid_get_quirks(edid); 4464 u32 quirks = edid_get_quirks(edid);
4464 4465
4466 drm_reset_display_info(connector);
4467
4465 info->width_mm = edid->width_cm * 10; 4468 info->width_mm = edid->width_cm * 10;
4466 info->height_mm = edid->height_cm * 10; 4469 info->height_mm = edid->height_cm * 10;
4467 4470
4468 /* driver figures it out in this case */
4469 info->bpc = 0;
4470 info->color_formats = 0;
4471 info->cea_rev = 0;
4472 info->max_tmds_clock = 0;
4473 info->dvi_dual = false;
4474 info->has_hdmi_infoframe = false;
4475
4476 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); 4471 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
4477 4472
4478 DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop); 4473 DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index fc8b2c6e3508..32d24c69da3c 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2140,10 +2140,22 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
2140 } 2140 }
2141 } 2141 }
2142 2142
2143 /* According to BSpec, "The CD clock frequency must be at least twice 2143 /*
2144 * According to BSpec, "The CD clock frequency must be at least twice
2144 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. 2145 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
2146 *
2147 * FIXME: Check the actual, not default, BCLK being used.
2148 *
2149 * FIXME: This does not depend on ->has_audio because the higher CDCLK
2150 * is required for audio probe, also when there are no audio capable
2151 * displays connected at probe time. This leads to unnecessarily high
2152 * CDCLK when audio is not required.
2153 *
2154 * FIXME: This limit is only applied when there are displays connected
2155 * at probe time. If we probe without displays, we'll still end up using
2156 * the platform minimum CDCLK, failing audio probe.
2145 */ 2157 */
2146 if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) 2158 if (INTEL_GEN(dev_priv) >= 9)
2147 min_cdclk = max(2 * 96000, min_cdclk); 2159 min_cdclk = max(2 * 96000, min_cdclk);
2148 2160
2149 /* 2161 /*
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d4368589b355..a80fbad9be0f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -49,12 +49,12 @@
49 * check the condition before the timeout. 49 * check the condition before the timeout.
50 */ 50 */
51#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 51#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
52 unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \ 52 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
53 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 53 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
54 int ret__; \ 54 int ret__; \
55 might_sleep(); \ 55 might_sleep(); \
56 for (;;) { \ 56 for (;;) { \
57 bool expired__ = time_after(jiffies, timeout__); \ 57 const bool expired__ = ktime_after(ktime_get_raw(), end__); \
58 OP; \ 58 OP; \
59 if (COND) { \ 59 if (COND) { \
60 ret__ = 0; \ 60 ret__ = 0; \
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6f12adc06365..6467a5cc2ca3 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -806,7 +806,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
806 return; 806 return;
807 807
808 intel_fbdev_sync(ifbdev); 808 intel_fbdev_sync(ifbdev);
809 if (ifbdev->vma) 809 if (ifbdev->vma || ifbdev->helper.deferred_setup)
810 drm_fb_helper_hotplug_event(&ifbdev->helper); 810 drm_fb_helper_hotplug_event(&ifbdev->helper);
811} 811}
812 812
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 53ea564f971e..66de4b2dc8b7 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -641,19 +641,18 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
641 641
642 DRM_DEBUG_KMS("Enabling DC6\n"); 642 DRM_DEBUG_KMS("Enabling DC6\n");
643 643
644 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 644 /* Wa Display #1183: skl,kbl,cfl */
645 if (IS_GEN9_BC(dev_priv))
646 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
647 SKL_SELECT_ALTERNATE_DC_EXIT);
645 648
649 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
646} 650}
647 651
648void skl_disable_dc6(struct drm_i915_private *dev_priv) 652void skl_disable_dc6(struct drm_i915_private *dev_priv)
649{ 653{
650 DRM_DEBUG_KMS("Disabling DC6\n"); 654 DRM_DEBUG_KMS("Disabling DC6\n");
651 655
652 /* Wa Display #1183: skl,kbl,cfl */
653 if (IS_GEN9_BC(dev_priv))
654 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
655 SKL_SELECT_ALTERNATE_DC_EXIT);
656
657 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 656 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
658} 657}
659 658
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 6e5e1aa54ce1..b001699297c4 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -351,6 +351,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
351 351
352 spin_lock_irqsave(&dev->event_lock, flags); 352 spin_lock_irqsave(&dev->event_lock, flags);
353 mdp4_crtc->event = crtc->state->event; 353 mdp4_crtc->event = crtc->state->event;
354 crtc->state->event = NULL;
354 spin_unlock_irqrestore(&dev->event_lock, flags); 355 spin_unlock_irqrestore(&dev->event_lock, flags);
355 356
356 blend_setup(crtc); 357 blend_setup(crtc);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 9893e43ba6c5..76b96081916f 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -708,6 +708,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
708 708
709 spin_lock_irqsave(&dev->event_lock, flags); 709 spin_lock_irqsave(&dev->event_lock, flags);
710 mdp5_crtc->event = crtc->state->event; 710 mdp5_crtc->event = crtc->state->event;
711 crtc->state->event = NULL;
711 spin_unlock_irqrestore(&dev->event_lock, flags); 712 spin_unlock_irqrestore(&dev->event_lock, flags);
712 713
713 /* 714 /*
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
index b4a8aa4490ee..005760bee708 100644
--- a/drivers/gpu/drm/msm/disp/mdp_format.c
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
@@ -171,7 +171,8 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
171 return i; 171 return i;
172} 172}
173 173
174const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format) 174const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format,
175 uint64_t modifier)
175{ 176{
176 int i; 177 int i;
177 for (i = 0; i < ARRAY_SIZE(formats); i++) { 178 for (i = 0; i < ARRAY_SIZE(formats); i++) {
diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h
index 1185487e7e5e..4fa8dbe4e165 100644
--- a/drivers/gpu/drm/msm/disp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.h
@@ -98,7 +98,7 @@ struct mdp_format {
98#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv) 98#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
99 99
100uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); 100uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
101const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); 101const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
102 102
103/* MDP capabilities */ 103/* MDP capabilities */
104#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ 104#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 7a03a9489708..8baba30d6c65 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -173,6 +173,7 @@ struct msm_dsi_host {
173 173
174 bool registered; 174 bool registered;
175 bool power_on; 175 bool power_on;
176 bool enabled;
176 int irq; 177 int irq;
177}; 178};
178 179
@@ -775,7 +776,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
775 switch (mipi_fmt) { 776 switch (mipi_fmt) {
776 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; 777 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
777 case MIPI_DSI_FMT_RGB666_PACKED: 778 case MIPI_DSI_FMT_RGB666_PACKED:
778 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666; 779 case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666;
779 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; 780 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
780 default: return CMD_DST_FORMAT_RGB888; 781 default: return CMD_DST_FORMAT_RGB888;
781 } 782 }
@@ -986,13 +987,19 @@ static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
986 987
987static void dsi_wait4video_done(struct msm_dsi_host *msm_host) 988static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
988{ 989{
990 u32 ret = 0;
991 struct device *dev = &msm_host->pdev->dev;
992
989 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1); 993 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
990 994
991 reinit_completion(&msm_host->video_comp); 995 reinit_completion(&msm_host->video_comp);
992 996
993 wait_for_completion_timeout(&msm_host->video_comp, 997 ret = wait_for_completion_timeout(&msm_host->video_comp,
994 msecs_to_jiffies(70)); 998 msecs_to_jiffies(70));
995 999
1000 if (ret <= 0)
1001 dev_err(dev, "wait for video done timed out\n");
1002
996 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); 1003 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
997} 1004}
998 1005
@@ -1001,7 +1008,7 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
1001 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) 1008 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
1002 return; 1009 return;
1003 1010
1004 if (msm_host->power_on) { 1011 if (msm_host->power_on && msm_host->enabled) {
1005 dsi_wait4video_done(msm_host); 1012 dsi_wait4video_done(msm_host);
1006 /* delay 4 ms to skip BLLP */ 1013 /* delay 4 ms to skip BLLP */
1007 usleep_range(2000, 4000); 1014 usleep_range(2000, 4000);
@@ -2203,7 +2210,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host)
2203 * pm_runtime_put_autosuspend(&msm_host->pdev->dev); 2210 * pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2204 * } 2211 * }
2205 */ 2212 */
2206 2213 msm_host->enabled = true;
2207 return 0; 2214 return 0;
2208} 2215}
2209 2216
@@ -2211,6 +2218,7 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host)
2211{ 2218{
2212 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2219 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2213 2220
2221 msm_host->enabled = false;
2214 dsi_op_mode_config(msm_host, 2222 dsi_op_mode_config(msm_host,
2215 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false); 2223 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
2216 2224
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 8e9d5c255820..9a9fa0c75a13 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -265,6 +265,115 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
265 return 0; 265 return 0;
266} 266}
267 267
268int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
269 struct msm_dsi_phy_clk_request *clk_req)
270{
271 const unsigned long bit_rate = clk_req->bitclk_rate;
272 const unsigned long esc_rate = clk_req->escclk_rate;
273 s32 ui, ui_x8, lpx;
274 s32 tmax, tmin;
275 s32 pcnt0 = 50;
276 s32 pcnt1 = 50;
277 s32 pcnt2 = 10;
278 s32 pcnt3 = 30;
279 s32 pcnt4 = 10;
280 s32 pcnt5 = 2;
281 s32 coeff = 1000; /* Precision, should avoid overflow */
282 s32 hb_en, hb_en_ckln;
283 s32 temp;
284
285 if (!bit_rate || !esc_rate)
286 return -EINVAL;
287
288 timing->hs_halfbyte_en = 0;
289 hb_en = 0;
290 timing->hs_halfbyte_en_ckln = 0;
291 hb_en_ckln = 0;
292
293 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
294 ui_x8 = ui << 3;
295 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
296
297 temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
298 tmin = max_t(s32, temp, 0);
299 temp = (95 * coeff) / ui_x8;
300 tmax = max_t(s32, temp, 0);
301 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
302
303 temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
304 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
305 tmax = (tmin > 255) ? 511 : 255;
306 timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
307
308 tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
309 temp = 105 * coeff + 12 * ui - 20 * coeff;
310 tmax = (temp + 3 * ui) / ui_x8;
311 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
312
313 temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
314 tmin = max_t(s32, temp, 0);
315 temp = (85 * coeff + 6 * ui) / ui_x8;
316 tmax = max_t(s32, temp, 0);
317 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
318
319 temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
320 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
321 tmax = 255;
322 timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
323
324 tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
325 temp = 105 * coeff + 12 * ui - 20 * coeff;
326 tmax = (temp / ui_x8) - 1;
327 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
328
329 temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
330 timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
331
332 tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
333 tmax = 255;
334 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
335
336 temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
337 timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
338
339 temp = 60 * coeff + 52 * ui - 43 * ui;
340 tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
341 tmax = 63;
342 timing->shared_timings.clk_post =
343 linear_inter(tmax, tmin, pcnt2, 0, false);
344
345 temp = 8 * ui + (timing->clk_prepare << 3) * ui;
346 temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
347 temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
348 (((timing->hs_rqst_ckln << 3) + 8) * ui);
349 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
350 tmax = 63;
351 if (tmin > tmax) {
352 temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
353 timing->shared_timings.clk_pre = temp >> 1;
354 timing->shared_timings.clk_pre_inc_by_2 = 1;
355 } else {
356 timing->shared_timings.clk_pre =
357 linear_inter(tmax, tmin, pcnt2, 0, false);
358 timing->shared_timings.clk_pre_inc_by_2 = 0;
359 }
360
361 timing->ta_go = 3;
362 timing->ta_sure = 0;
363 timing->ta_get = 4;
364
365 DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
366 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
367 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
368 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
369 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
370 timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
371 timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
372 timing->hs_prep_dly_ckln);
373
374 return 0;
375}
376
268void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, 377void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
269 u32 bit_mask) 378 u32 bit_mask)
270{ 379{
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index c56268cbdb3d..a24ab80994a3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -101,6 +101,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
101 struct msm_dsi_phy_clk_request *clk_req); 101 struct msm_dsi_phy_clk_request *clk_req);
102int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, 102int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
103 struct msm_dsi_phy_clk_request *clk_req); 103 struct msm_dsi_phy_clk_request *clk_req);
104int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
105 struct msm_dsi_phy_clk_request *clk_req);
104void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, 106void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
105 u32 bit_mask); 107 u32 bit_mask);
106int msm_dsi_phy_init_common(struct msm_dsi_phy *phy); 108int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 0af951aaeea1..b3fffc8dbb2a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -79,34 +79,6 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
79 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04); 79 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
80} 80}
81 81
82static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
83 struct msm_dsi_phy_clk_request *clk_req)
84{
85 /*
86 * TODO: These params need to be computed, they're currently hardcoded
87 * for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a
88 * default escape clock of 19.2 Mhz.
89 */
90
91 timing->hs_halfbyte_en = 0;
92 timing->clk_zero = 0x1c;
93 timing->clk_prepare = 0x07;
94 timing->clk_trail = 0x07;
95 timing->hs_exit = 0x23;
96 timing->hs_zero = 0x21;
97 timing->hs_prepare = 0x07;
98 timing->hs_trail = 0x07;
99 timing->hs_rqst = 0x05;
100 timing->ta_sure = 0x00;
101 timing->ta_go = 0x03;
102 timing->ta_get = 0x04;
103
104 timing->shared_timings.clk_pre = 0x2d;
105 timing->shared_timings.clk_post = 0x0d;
106
107 return 0;
108}
109
110static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, 82static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
111 struct msm_dsi_phy_clk_request *clk_req) 83 struct msm_dsi_phy_clk_request *clk_req)
112{ 84{
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 0e0c87252ab0..7a16242bf8bf 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -183,7 +183,8 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
183 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); 183 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
184 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); 184 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
185 185
186 format = kms->funcs->get_format(kms, mode_cmd->pixel_format); 186 format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
187 mode_cmd->modifier[0]);
187 if (!format) { 188 if (!format) {
188 dev_err(dev->dev, "unsupported pixel format: %4.4s\n", 189 dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
189 (char *)&mode_cmd->pixel_format); 190 (char *)&mode_cmd->pixel_format);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c178563fcd4d..456622b46335 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
92 92
93 if (IS_ERR(fb)) { 93 if (IS_ERR(fb)) {
94 dev_err(dev->dev, "failed to allocate fb\n"); 94 dev_err(dev->dev, "failed to allocate fb\n");
95 ret = PTR_ERR(fb); 95 return PTR_ERR(fb);
96 goto fail;
97 } 96 }
98 97
99 bo = msm_framebuffer_bo(fb, 0); 98 bo = msm_framebuffer_bo(fb, 0);
@@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
151 150
152fail_unlock: 151fail_unlock:
153 mutex_unlock(&dev->struct_mutex); 152 mutex_unlock(&dev->struct_mutex);
154fail: 153 drm_framebuffer_remove(fb);
155
156 if (ret) {
157 if (fb)
158 drm_framebuffer_remove(fb);
159 }
160
161 return ret; 154 return ret;
162} 155}
163 156
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 95196479f651..f583bb4222f9 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -132,17 +132,19 @@ static void put_pages(struct drm_gem_object *obj)
132 struct msm_gem_object *msm_obj = to_msm_bo(obj); 132 struct msm_gem_object *msm_obj = to_msm_bo(obj);
133 133
134 if (msm_obj->pages) { 134 if (msm_obj->pages) {
135 /* For non-cached buffers, ensure the new pages are clean 135 if (msm_obj->sgt) {
136 * because display controller, GPU, etc. are not coherent: 136 /* For non-cached buffers, ensure the new
137 */ 137 * pages are clean because display controller,
138 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 138 * GPU, etc. are not coherent:
139 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 139 */
140 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 140 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
141 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
142 msm_obj->sgt->nents,
143 DMA_BIDIRECTIONAL);
141 144
142 if (msm_obj->sgt)
143 sg_free_table(msm_obj->sgt); 145 sg_free_table(msm_obj->sgt);
144 146 kfree(msm_obj->sgt);
145 kfree(msm_obj->sgt); 147 }
146 148
147 if (use_pages(obj)) 149 if (use_pages(obj))
148 drm_gem_put_pages(obj, msm_obj->pages, true, false); 150 drm_gem_put_pages(obj, msm_obj->pages, true, false);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 17d5824417ad..aaa329dc020e 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -48,8 +48,11 @@ struct msm_kms_funcs {
48 /* functions to wait for atomic commit completed on each CRTC */ 48 /* functions to wait for atomic commit completed on each CRTC */
49 void (*wait_for_crtc_commit_done)(struct msm_kms *kms, 49 void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
50 struct drm_crtc *crtc); 50 struct drm_crtc *crtc);
51 /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
52 const struct msm_format *(*get_format)(struct msm_kms *kms,
53 const uint32_t format,
54 const uint64_t modifiers);
51 /* misc: */ 55 /* misc: */
52 const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
53 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, 56 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
54 struct drm_encoder *encoder); 57 struct drm_encoder *encoder);
55 int (*set_split_display)(struct msm_kms *kms, 58 int (*set_split_display)(struct msm_kms *kms,
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index c0fb52c6d4ca..01665b98c57e 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -179,10 +179,9 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
179 uint32_t type, bool interruptible) 179 uint32_t type, bool interruptible)
180{ 180{
181 struct qxl_command cmd; 181 struct qxl_command cmd;
182 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
183 182
184 cmd.type = type; 183 cmd.type = type;
185 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); 184 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
186 185
187 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); 186 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
188} 187}
@@ -192,10 +191,9 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
192 uint32_t type, bool interruptible) 191 uint32_t type, bool interruptible)
193{ 192{
194 struct qxl_command cmd; 193 struct qxl_command cmd;
195 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
196 194
197 cmd.type = type; 195 cmd.type = type;
198 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); 196 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
199 197
200 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); 198 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
201} 199}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 00a1a66b052a..864b456080c4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -167,6 +167,7 @@ struct qxl_release {
167 167
168 int id; 168 int id;
169 int type; 169 int type;
170 struct qxl_bo *release_bo;
170 uint32_t release_offset; 171 uint32_t release_offset;
171 uint32_t surface_release_id; 172 uint32_t surface_release_id;
172 struct ww_acquire_ctx ticket; 173 struct ww_acquire_ctx ticket;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index e238a1a2eca1..6cc9f3367fa0 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -182,9 +182,9 @@ static int qxl_process_single_command(struct qxl_device *qdev,
182 goto out_free_reloc; 182 goto out_free_reloc;
183 183
184 /* TODO copy slow path code from i915 */ 184 /* TODO copy slow path code from i915 */
185 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); 185 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
186 unwritten = __copy_from_user_inatomic_nocache 186 unwritten = __copy_from_user_inatomic_nocache
187 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), 187 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
188 u64_to_user_ptr(cmd->command), cmd->command_size); 188 u64_to_user_ptr(cmd->command), cmd->command_size);
189 189
190 { 190 {
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 5d84a66fed36..7cb214577275 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -173,6 +173,7 @@ qxl_release_free_list(struct qxl_release *release)
173 list_del(&entry->tv.head); 173 list_del(&entry->tv.head);
174 kfree(entry); 174 kfree(entry);
175 } 175 }
176 release->release_bo = NULL;
176} 177}
177 178
178void 179void
@@ -296,7 +297,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
296{ 297{
297 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 298 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
298 int idr_ret; 299 int idr_ret;
299 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
300 struct qxl_bo *bo; 300 struct qxl_bo *bo;
301 union qxl_release_info *info; 301 union qxl_release_info *info;
302 302
@@ -304,8 +304,9 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
304 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 304 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
305 if (idr_ret < 0) 305 if (idr_ret < 0)
306 return idr_ret; 306 return idr_ret;
307 bo = to_qxl_bo(entry->tv.bo); 307 bo = create_rel->release_bo;
308 308
309 (*release)->release_bo = bo;
309 (*release)->release_offset = create_rel->release_offset + 64; 310 (*release)->release_offset = create_rel->release_offset + 64;
310 311
311 qxl_release_list_add(*release, bo); 312 qxl_release_list_add(*release, bo);
@@ -365,6 +366,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
365 366
366 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 367 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
367 368
369 (*release)->release_bo = bo;
368 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; 370 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
369 qdev->current_release_bo_offset[cur_idx]++; 371 qdev->current_release_bo_offset[cur_idx]++;
370 372
@@ -408,13 +410,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
408{ 410{
409 void *ptr; 411 void *ptr;
410 union qxl_release_info *info; 412 union qxl_release_info *info;
411 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 413 struct qxl_bo *bo = release->release_bo;
412 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
413 414
414 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 415 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
415 if (!ptr) 416 if (!ptr)
416 return NULL; 417 return NULL;
417 info = ptr + (release->release_offset & ~PAGE_SIZE); 418 info = ptr + (release->release_offset & ~PAGE_MASK);
418 return info; 419 return info;
419} 420}
420 421
@@ -422,11 +423,10 @@ void qxl_release_unmap(struct qxl_device *qdev,
422 struct qxl_release *release, 423 struct qxl_release *release,
423 union qxl_release_info *info) 424 union qxl_release_info *info)
424{ 425{
425 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 426 struct qxl_bo *bo = release->release_bo;
426 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
427 void *ptr; 427 void *ptr;
428 428
429 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 429 ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
430 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 430 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
431} 431}
432 432
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index bffff4c9fbf5..be3f14d7746d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -94,64 +94,9 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
94 } 94 }
95} 95}
96 96
97static enum drm_mode_status sun4i_lvds_encoder_mode_valid(struct drm_encoder *crtc,
98 const struct drm_display_mode *mode)
99{
100 struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(crtc);
101 struct sun4i_tcon *tcon = lvds->tcon;
102 u32 hsync = mode->hsync_end - mode->hsync_start;
103 u32 vsync = mode->vsync_end - mode->vsync_start;
104 unsigned long rate = mode->clock * 1000;
105 long rounded_rate;
106
107 DRM_DEBUG_DRIVER("Validating modes...\n");
108
109 if (hsync < 1)
110 return MODE_HSYNC_NARROW;
111
112 if (hsync > 0x3ff)
113 return MODE_HSYNC_WIDE;
114
115 if ((mode->hdisplay < 1) || (mode->htotal < 1))
116 return MODE_H_ILLEGAL;
117
118 if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff))
119 return MODE_BAD_HVALUE;
120
121 DRM_DEBUG_DRIVER("Horizontal parameters OK\n");
122
123 if (vsync < 1)
124 return MODE_VSYNC_NARROW;
125
126 if (vsync > 0x3ff)
127 return MODE_VSYNC_WIDE;
128
129 if ((mode->vdisplay < 1) || (mode->vtotal < 1))
130 return MODE_V_ILLEGAL;
131
132 if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff))
133 return MODE_BAD_VVALUE;
134
135 DRM_DEBUG_DRIVER("Vertical parameters OK\n");
136
137 tcon->dclk_min_div = 7;
138 tcon->dclk_max_div = 7;
139 rounded_rate = clk_round_rate(tcon->dclk, rate);
140 if (rounded_rate < rate)
141 return MODE_CLOCK_LOW;
142
143 if (rounded_rate > rate)
144 return MODE_CLOCK_HIGH;
145
146 DRM_DEBUG_DRIVER("Clock rate OK\n");
147
148 return MODE_OK;
149}
150
151static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = { 97static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
152 .disable = sun4i_lvds_encoder_disable, 98 .disable = sun4i_lvds_encoder_disable,
153 .enable = sun4i_lvds_encoder_enable, 99 .enable = sun4i_lvds_encoder_enable,
154 .mode_valid = sun4i_lvds_encoder_mode_valid,
155}; 100};
156 101
157static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = { 102static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 48e4f1df6e5d..020070d483d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -293,7 +293,7 @@ retry:
293 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); 293 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
294 if (ret == -ENOSPC) { 294 if (ret == -ENOSPC) {
295 spin_unlock(&vgdev->ctrlq.qlock); 295 spin_unlock(&vgdev->ctrlq.qlock);
296 wait_event(vgdev->ctrlq.ack_queue, vq->num_free); 296 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
297 spin_lock(&vgdev->ctrlq.qlock); 297 spin_lock(&vgdev->ctrlq.qlock);
298 goto retry; 298 goto retry;
299 } else { 299 } else {
@@ -368,7 +368,7 @@ retry:
368 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); 368 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
369 if (ret == -ENOSPC) { 369 if (ret == -ENOSPC) {
370 spin_unlock(&vgdev->cursorq.qlock); 370 spin_unlock(&vgdev->cursorq.qlock);
371 wait_event(vgdev->cursorq.ack_queue, vq->num_free); 371 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
372 spin_lock(&vgdev->cursorq.qlock); 372 spin_lock(&vgdev->cursorq.qlock);
373 goto retry; 373 goto retry;
374 } else { 374 } else {
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 051a72eecb24..d2cc55e21374 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -40,6 +40,10 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
40#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 40#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
41#endif 41#endif
42 42
43#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB
44#define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0
45#endif
46
43/* CPUID function 0x80000001, ebx */ 47/* CPUID function 0x80000001, ebx */
44#define CPUID_PKGTYPE_MASK 0xf0000000 48#define CPUID_PKGTYPE_MASK 0xf0000000
45#define CPUID_PKGTYPE_F 0x00000000 49#define CPUID_PKGTYPE_F 0x00000000
@@ -72,6 +76,7 @@ struct k10temp_data {
72 struct pci_dev *pdev; 76 struct pci_dev *pdev;
73 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); 77 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
74 int temp_offset; 78 int temp_offset;
79 u32 temp_adjust_mask;
75}; 80};
76 81
77struct tctl_offset { 82struct tctl_offset {
@@ -84,6 +89,7 @@ static const struct tctl_offset tctl_offset_table[] = {
84 { 0x17, "AMD Ryzen 5 1600X", 20000 }, 89 { 0x17, "AMD Ryzen 5 1600X", 20000 },
85 { 0x17, "AMD Ryzen 7 1700X", 20000 }, 90 { 0x17, "AMD Ryzen 7 1700X", 20000 },
86 { 0x17, "AMD Ryzen 7 1800X", 20000 }, 91 { 0x17, "AMD Ryzen 7 1800X", 20000 },
92 { 0x17, "AMD Ryzen 7 2700X", 10000 },
87 { 0x17, "AMD Ryzen Threadripper 1950X", 27000 }, 93 { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
88 { 0x17, "AMD Ryzen Threadripper 1920X", 27000 }, 94 { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
89 { 0x17, "AMD Ryzen Threadripper 1900X", 27000 }, 95 { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
@@ -129,6 +135,8 @@ static ssize_t temp1_input_show(struct device *dev,
129 135
130 data->read_tempreg(data->pdev, &regval); 136 data->read_tempreg(data->pdev, &regval);
131 temp = (regval >> 21) * 125; 137 temp = (regval >> 21) * 125;
138 if (regval & data->temp_adjust_mask)
139 temp -= 49000;
132 if (temp > data->temp_offset) 140 if (temp > data->temp_offset)
133 temp -= data->temp_offset; 141 temp -= data->temp_offset;
134 else 142 else
@@ -259,12 +267,14 @@ static int k10temp_probe(struct pci_dev *pdev,
259 data->pdev = pdev; 267 data->pdev = pdev;
260 268
261 if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || 269 if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
262 boot_cpu_data.x86_model == 0x70)) 270 boot_cpu_data.x86_model == 0x70)) {
263 data->read_tempreg = read_tempreg_nb_f15; 271 data->read_tempreg = read_tempreg_nb_f15;
264 else if (boot_cpu_data.x86 == 0x17) 272 } else if (boot_cpu_data.x86 == 0x17) {
273 data->temp_adjust_mask = 0x80000;
265 data->read_tempreg = read_tempreg_nb_f17; 274 data->read_tempreg = read_tempreg_nb_f17;
266 else 275 } else {
267 data->read_tempreg = read_tempreg_pci; 276 data->read_tempreg = read_tempreg_pci;
277 }
268 278
269 for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) { 279 for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
270 const struct tctl_offset *entry = &tctl_offset_table[i]; 280 const struct tctl_offset *entry = &tctl_offset_table[i];
@@ -292,6 +302,7 @@ static const struct pci_device_id k10temp_id_table[] = {
292 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 302 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
293 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 303 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
294 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 304 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
305 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) },
295 {} 306 {}
296}; 307};
297MODULE_DEVICE_TABLE(pci, k10temp_id_table); 308MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 8b0bc4fc06e8..b0bc77bf2cd9 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -1380,8 +1380,8 @@ static int __init nct6683_find(int sioaddr, struct nct6683_sio_data *sio_data)
1380 /* Activate logical device if needed */ 1380 /* Activate logical device if needed */
1381 val = superio_inb(sioaddr, SIO_REG_ENABLE); 1381 val = superio_inb(sioaddr, SIO_REG_ENABLE);
1382 if (!(val & 0x01)) { 1382 if (!(val & 0x01)) {
1383 pr_err("EC is disabled\n"); 1383 pr_warn("Forcibly enabling EC access. Data may be unusable.\n");
1384 goto fail; 1384 superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
1385 } 1385 }
1386 1386
1387 superio_exit(sioaddr); 1387 superio_exit(sioaddr);
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 363bf56eb0f2..91976b6ca300 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -170,7 +170,10 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
170 scmi_chip_info.info = ptr_scmi_ci; 170 scmi_chip_info.info = ptr_scmi_ci;
171 chip_info = &scmi_chip_info; 171 chip_info = &scmi_chip_info;
172 172
173 for (type = 0; type < hwmon_max && nr_count[type]; type++) { 173 for (type = 0; type < hwmon_max; type++) {
174 if (!nr_count[type])
175 continue;
176
174 scmi_hwmon_add_chan_info(scmi_hwmon_chan, dev, nr_count[type], 177 scmi_hwmon_add_chan_info(scmi_hwmon_chan, dev, nr_count[type],
175 type, hwmon_attributes[type]); 178 type, hwmon_attributes[type]);
176 *ptr_scmi_ci++ = scmi_hwmon_chan++; 179 *ptr_scmi_ci++ = scmi_hwmon_chan++;
diff --git a/drivers/memory/emif-asm-offsets.c b/drivers/memory/emif-asm-offsets.c
index 71a89d5d3efd..db8043019ec6 100644
--- a/drivers/memory/emif-asm-offsets.c
+++ b/drivers/memory/emif-asm-offsets.c
@@ -16,77 +16,7 @@
16 16
17int main(void) 17int main(void)
18{ 18{
19 DEFINE(EMIF_SDCFG_VAL_OFFSET, 19 ti_emif_asm_offsets();
20 offsetof(struct emif_regs_amx3, emif_sdcfg_val));
21 DEFINE(EMIF_TIMING1_VAL_OFFSET,
22 offsetof(struct emif_regs_amx3, emif_timing1_val));
23 DEFINE(EMIF_TIMING2_VAL_OFFSET,
24 offsetof(struct emif_regs_amx3, emif_timing2_val));
25 DEFINE(EMIF_TIMING3_VAL_OFFSET,
26 offsetof(struct emif_regs_amx3, emif_timing3_val));
27 DEFINE(EMIF_REF_CTRL_VAL_OFFSET,
28 offsetof(struct emif_regs_amx3, emif_ref_ctrl_val));
29 DEFINE(EMIF_ZQCFG_VAL_OFFSET,
30 offsetof(struct emif_regs_amx3, emif_zqcfg_val));
31 DEFINE(EMIF_PMCR_VAL_OFFSET,
32 offsetof(struct emif_regs_amx3, emif_pmcr_val));
33 DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET,
34 offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val));
35 DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET,
36 offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl));
37 DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET,
38 offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh));
39 DEFINE(EMIF_COS_CONFIG_OFFSET,
40 offsetof(struct emif_regs_amx3, emif_cos_config));
41 DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET,
42 offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping));
43 DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET,
44 offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map));
45 DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET,
46 offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map));
47 DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET,
48 offsetof(struct emif_regs_amx3, emif_ocp_config_val));
49 DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET,
50 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim));
51 DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET,
52 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw));
53 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET,
54 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val));
55 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET,
56 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw));
57 DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET,
58 offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1));
59 DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET,
60 offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals));
61 DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3));
62
63 BLANK();
64
65 DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET,
66 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt));
67 DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET,
68 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys));
69 DEFINE(EMIF_PM_CONFIG_OFFSET,
70 offsetof(struct ti_emif_pm_data, ti_emif_sram_config));
71 DEFINE(EMIF_PM_REGS_VIRT_OFFSET,
72 offsetof(struct ti_emif_pm_data, regs_virt));
73 DEFINE(EMIF_PM_REGS_PHYS_OFFSET,
74 offsetof(struct ti_emif_pm_data, regs_phys));
75 DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data));
76
77 BLANK();
78
79 DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET,
80 offsetof(struct ti_emif_pm_functions, save_context));
81 DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET,
82 offsetof(struct ti_emif_pm_functions, restore_context));
83 DEFINE(EMIF_PM_ENTER_SR_OFFSET,
84 offsetof(struct ti_emif_pm_functions, enter_sr));
85 DEFINE(EMIF_PM_EXIT_SR_OFFSET,
86 offsetof(struct ti_emif_pm_functions, exit_sr));
87 DEFINE(EMIF_PM_ABORT_SR_OFFSET,
88 offsetof(struct ti_emif_pm_functions, abort_sr));
89 DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions));
90 20
91 return 0; 21 return 0;
92} 22}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 231f3a1e27bf..86503f60468f 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
1994 .cmd_per_lun = 7, 1994 .cmd_per_lun = 7,
1995 .use_clustering = ENABLE_CLUSTERING, 1995 .use_clustering = ENABLE_CLUSTERING,
1996 .shost_attrs = mptscsih_host_attrs, 1996 .shost_attrs = mptscsih_host_attrs,
1997 .no_write_same = 1,
1997}; 1998};
1998 1999
1999static int mptsas_get_linkerrors(struct sas_phy *phy) 2000static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index d4c07b85f18e..f5695be14499 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -45,6 +45,7 @@
45#define I82802AB 0x00ad 45#define I82802AB 0x00ad
46#define I82802AC 0x00ac 46#define I82802AC 0x00ac
47#define PF38F4476 0x881c 47#define PF38F4476 0x881c
48#define M28F00AP30 0x8963
48/* STMicroelectronics chips */ 49/* STMicroelectronics chips */
49#define M50LPW080 0x002F 50#define M50LPW080 0x002F
50#define M50FLW080A 0x0080 51#define M50FLW080A 0x0080
@@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
375 extp->MinorVersion = '1'; 376 extp->MinorVersion = '1';
376} 377}
377 378
379static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
380{
381 /*
382 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383 * Erase Supend for their small Erase Blocks(0x8000)
384 */
385 if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
386 return 1;
387 return 0;
388}
389
378static inline struct cfi_pri_intelext * 390static inline struct cfi_pri_intelext *
379read_pri_intelext(struct map_info *map, __u16 adr) 391read_pri_intelext(struct map_info *map, __u16 adr)
380{ 392{
@@ -831,21 +843,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
831 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1)))) 843 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
832 goto sleep; 844 goto sleep;
833 845
846 /* Do not allow suspend iff read/write to EB address */
847 if ((adr & chip->in_progress_block_mask) ==
848 chip->in_progress_block_addr)
849 goto sleep;
850
851 /* do not suspend small EBs, buggy Micron Chips */
852 if (cfi_is_micron_28F00AP30(cfi, chip) &&
853 (chip->in_progress_block_mask == ~(0x8000-1)))
854 goto sleep;
834 855
835 /* Erase suspend */ 856 /* Erase suspend */
836 map_write(map, CMD(0xB0), adr); 857 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
837 858
838 /* If the flash has finished erasing, then 'erase suspend' 859 /* If the flash has finished erasing, then 'erase suspend'
839 * appears to make some (28F320) flash devices switch to 860 * appears to make some (28F320) flash devices switch to
840 * 'read' mode. Make sure that we switch to 'read status' 861 * 'read' mode. Make sure that we switch to 'read status'
841 * mode so we get the right data. --rmk 862 * mode so we get the right data. --rmk
842 */ 863 */
843 map_write(map, CMD(0x70), adr); 864 map_write(map, CMD(0x70), chip->in_progress_block_addr);
844 chip->oldstate = FL_ERASING; 865 chip->oldstate = FL_ERASING;
845 chip->state = FL_ERASE_SUSPENDING; 866 chip->state = FL_ERASE_SUSPENDING;
846 chip->erase_suspended = 1; 867 chip->erase_suspended = 1;
847 for (;;) { 868 for (;;) {
848 status = map_read(map, adr); 869 status = map_read(map, chip->in_progress_block_addr);
849 if (map_word_andequal(map, status, status_OK, status_OK)) 870 if (map_word_andequal(map, status, status_OK, status_OK))
850 break; 871 break;
851 872
@@ -1041,8 +1062,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
1041 sending the 0x70 (Read Status) command to an erasing 1062 sending the 0x70 (Read Status) command to an erasing
1042 chip and expecting it to be ignored, that's what we 1063 chip and expecting it to be ignored, that's what we
1043 do. */ 1064 do. */
1044 map_write(map, CMD(0xd0), adr); 1065 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1045 map_write(map, CMD(0x70), adr); 1066 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1046 chip->oldstate = FL_READY; 1067 chip->oldstate = FL_READY;
1047 chip->state = FL_ERASING; 1068 chip->state = FL_ERASING;
1048 break; 1069 break;
@@ -1933,6 +1954,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1933 map_write(map, CMD(0xD0), adr); 1954 map_write(map, CMD(0xD0), adr);
1934 chip->state = FL_ERASING; 1955 chip->state = FL_ERASING;
1935 chip->erase_suspended = 0; 1956 chip->erase_suspended = 0;
1957 chip->in_progress_block_addr = adr;
1958 chip->in_progress_block_mask = ~(len - 1);
1936 1959
1937 ret = INVAL_CACHE_AND_WAIT(map, chip, adr, 1960 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1938 adr, len, 1961 adr, len,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 668e2cbc155b..692902df2598 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -816,9 +816,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
816 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 816 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
817 goto sleep; 817 goto sleep;
818 818
819 /* We could check to see if we're trying to access the sector 819 /* Do not allow suspend iff read/write to EB address */
820 * that is currently being erased. However, no user will try 820 if ((adr & chip->in_progress_block_mask) ==
821 * anything like that so we just wait for the timeout. */ 821 chip->in_progress_block_addr)
822 goto sleep;
822 823
823 /* Erase suspend */ 824 /* Erase suspend */
824 /* It's harmless to issue the Erase-Suspend and Erase-Resume 825 /* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -2267,6 +2268,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2267 chip->state = FL_ERASING; 2268 chip->state = FL_ERASING;
2268 chip->erase_suspended = 0; 2269 chip->erase_suspended = 0;
2269 chip->in_progress_block_addr = adr; 2270 chip->in_progress_block_addr = adr;
2271 chip->in_progress_block_mask = ~(map->size - 1);
2270 2272
2271 INVALIDATE_CACHE_UDELAY(map, chip, 2273 INVALIDATE_CACHE_UDELAY(map, chip,
2272 adr, map->size, 2274 adr, map->size,
@@ -2356,6 +2358,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
2356 chip->state = FL_ERASING; 2358 chip->state = FL_ERASING;
2357 chip->erase_suspended = 0; 2359 chip->erase_suspended = 0;
2358 chip->in_progress_block_addr = adr; 2360 chip->in_progress_block_addr = adr;
2361 chip->in_progress_block_mask = ~(len - 1);
2359 2362
2360 INVALIDATE_CACHE_UDELAY(map, chip, 2363 INVALIDATE_CACHE_UDELAY(map, chip,
2361 adr, len, 2364 adr, len,
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
index d0cd6f8635d7..9c9f8936b63b 100644
--- a/drivers/mtd/nand/core.c
+++ b/drivers/mtd/nand/core.c
@@ -162,7 +162,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
162 ret = nanddev_erase(nand, &pos); 162 ret = nanddev_erase(nand, &pos);
163 if (ret) { 163 if (ret) {
164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos); 164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
165 einfo->state = MTD_ERASE_FAILED;
166 165
167 return ret; 166 return ret;
168 } 167 }
@@ -170,8 +169,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
170 nanddev_pos_next_eraseblock(nand, &pos); 169 nanddev_pos_next_eraseblock(nand, &pos);
171 } 170 }
172 171
173 einfo->state = MTD_ERASE_DONE;
174
175 return 0; 172 return 0;
176} 173}
177EXPORT_SYMBOL_GPL(nanddev_mtd_erase); 174EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 10e953218948..1d779a35ac8e 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2299,29 +2299,20 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
2299 /* 2299 /*
2300 * The legacy "num-cs" property indicates the number of CS on the only 2300 * The legacy "num-cs" property indicates the number of CS on the only
2301 * chip connected to the controller (legacy bindings does not support 2301 * chip connected to the controller (legacy bindings does not support
2302 * more than one chip). CS are only incremented one by one while the RB 2302 * more than one chip). The CS and RB pins are always the #0.
2303 * pin is always the #0.
2304 * 2303 *
2305 * When not using legacy bindings, a couple of "reg" and "nand-rb" 2304 * When not using legacy bindings, a couple of "reg" and "nand-rb"
2306 * properties must be filled. For each chip, expressed as a subnode, 2305 * properties must be filled. For each chip, expressed as a subnode,
2307 * "reg" points to the CS lines and "nand-rb" to the RB line. 2306 * "reg" points to the CS lines and "nand-rb" to the RB line.
2308 */ 2307 */
2309 if (pdata) { 2308 if (pdata || nfc->caps->legacy_of_bindings) {
2310 nsels = 1; 2309 nsels = 1;
2311 } else if (nfc->caps->legacy_of_bindings && 2310 } else {
2312 !of_get_property(np, "num-cs", &nsels)) { 2311 nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
2313 dev_err(dev, "missing num-cs property\n"); 2312 if (nsels <= 0) {
2314 return -EINVAL; 2313 dev_err(dev, "missing/invalid reg property\n");
2315 } else if (!of_get_property(np, "reg", &nsels)) { 2314 return -EINVAL;
2316 dev_err(dev, "missing reg property\n"); 2315 }
2317 return -EINVAL;
2318 }
2319
2320 if (!pdata)
2321 nsels /= sizeof(u32);
2322 if (!nsels) {
2323 dev_err(dev, "invalid reg property size\n");
2324 return -EINVAL;
2325 } 2316 }
2326 2317
2327 /* Alloc the nand chip structure */ 2318 /* Alloc the nand chip structure */
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index f54518ffb36a..f2052fae21c7 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -645,7 +645,7 @@ static int tango_nand_probe(struct platform_device *pdev)
645 645
646 writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE); 646 writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
647 647
648 clk = clk_get(&pdev->dev, NULL); 648 clk = devm_clk_get(&pdev->dev, NULL);
649 if (IS_ERR(clk)) 649 if (IS_ERR(clk))
650 return PTR_ERR(clk); 650 return PTR_ERR(clk);
651 651
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 4b8e9183489a..5872f31eaa60 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -501,7 +501,9 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
501 void __iomem *reg_base = cqspi->iobase; 501 void __iomem *reg_base = cqspi->iobase;
502 void __iomem *ahb_base = cqspi->ahb_base; 502 void __iomem *ahb_base = cqspi->ahb_base;
503 unsigned int remaining = n_rx; 503 unsigned int remaining = n_rx;
504 unsigned int mod_bytes = n_rx % 4;
504 unsigned int bytes_to_read = 0; 505 unsigned int bytes_to_read = 0;
506 u8 *rxbuf_end = rxbuf + n_rx;
505 int ret = 0; 507 int ret = 0;
506 508
507 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); 509 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
@@ -530,11 +532,24 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
530 } 532 }
531 533
532 while (bytes_to_read != 0) { 534 while (bytes_to_read != 0) {
535 unsigned int word_remain = round_down(remaining, 4);
536
533 bytes_to_read *= cqspi->fifo_width; 537 bytes_to_read *= cqspi->fifo_width;
534 bytes_to_read = bytes_to_read > remaining ? 538 bytes_to_read = bytes_to_read > remaining ?
535 remaining : bytes_to_read; 539 remaining : bytes_to_read;
536 ioread32_rep(ahb_base, rxbuf, 540 bytes_to_read = round_down(bytes_to_read, 4);
537 DIV_ROUND_UP(bytes_to_read, 4)); 541 /* Read 4 byte word chunks then single bytes */
542 if (bytes_to_read) {
543 ioread32_rep(ahb_base, rxbuf,
544 (bytes_to_read / 4));
545 } else if (!word_remain && mod_bytes) {
546 unsigned int temp = ioread32(ahb_base);
547
548 bytes_to_read = mod_bytes;
549 memcpy(rxbuf, &temp, min((unsigned int)
550 (rxbuf_end - rxbuf),
551 bytes_to_read));
552 }
538 rxbuf += bytes_to_read; 553 rxbuf += bytes_to_read;
539 remaining -= bytes_to_read; 554 remaining -= bytes_to_read;
540 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 555 bytes_to_read = cqspi_get_rd_sram_level(cqspi);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b7b113018853..718e4914e3a0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1660 } /* switch(bond_mode) */ 1660 } /* switch(bond_mode) */
1661 1661
1662#ifdef CONFIG_NET_POLL_CONTROLLER 1662#ifdef CONFIG_NET_POLL_CONTROLLER
1663 slave_dev->npinfo = bond->dev->npinfo; 1663 if (bond->dev->npinfo) {
1664 if (slave_dev->npinfo) {
1665 if (slave_enable_netpoll(new_slave)) { 1664 if (slave_enable_netpoll(new_slave)) {
1666 netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n"); 1665 netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
1667 res = -EBUSY; 1666 res = -EBUSY;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 7ea72ef11a55..d272dc6984ac 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1321,6 +1321,10 @@
1321#define MDIO_VEND2_AN_STAT 0x8002 1321#define MDIO_VEND2_AN_STAT 0x8002
1322#endif 1322#endif
1323 1323
1324#ifndef MDIO_VEND2_PMA_CDR_CONTROL
1325#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
1326#endif
1327
1324#ifndef MDIO_CTRL1_SPEED1G 1328#ifndef MDIO_CTRL1_SPEED1G
1325#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) 1329#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
1326#endif 1330#endif
@@ -1369,6 +1373,10 @@
1369#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08 1373#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
1370#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100 1374#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100
1371 1375
1376#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01
1377#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
1378#define XGBE_PMA_CDR_TRACK_EN_ON 0x01
1379
1372/* Bit setting and getting macros 1380/* Bit setting and getting macros
1373 * The get macro will extract the current bit field value from within 1381 * The get macro will extract the current bit field value from within
1374 * the variable 1382 * the variable
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 7d128be61310..b91143947ed2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
519 "debugfs_create_file failed\n"); 519 "debugfs_create_file failed\n");
520 } 520 }
521 521
522 if (pdata->vdata->an_cdr_workaround) {
523 pfile = debugfs_create_bool("an_cdr_workaround", 0600,
524 pdata->xgbe_debugfs,
525 &pdata->debugfs_an_cdr_workaround);
526 if (!pfile)
527 netdev_err(pdata->netdev,
528 "debugfs_create_bool failed\n");
529
530 pfile = debugfs_create_bool("an_cdr_track_early", 0600,
531 pdata->xgbe_debugfs,
532 &pdata->debugfs_an_cdr_track_early);
533 if (!pfile)
534 netdev_err(pdata->netdev,
535 "debugfs_create_bool failed\n");
536 }
537
522 kfree(buf); 538 kfree(buf);
523} 539}
524 540
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 795e556d4a3f..441d0973957b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
349 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); 349 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
350 350
351 /* Call MDIO/PHY initialization routine */ 351 /* Call MDIO/PHY initialization routine */
352 pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
352 ret = pdata->phy_if.phy_init(pdata); 353 ret = pdata->phy_if.phy_init(pdata);
353 if (ret) 354 if (ret)
354 return ret; 355 return ret;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 072b9f664597..1b45cd73a258 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
432 xgbe_an73_set(pdata, false, false); 432 xgbe_an73_set(pdata, false, false);
433 xgbe_an73_disable_interrupts(pdata); 433 xgbe_an73_disable_interrupts(pdata);
434 434
435 pdata->an_start = 0;
436
435 netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n"); 437 netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
436} 438}
437 439
438static void xgbe_an_restart(struct xgbe_prv_data *pdata) 440static void xgbe_an_restart(struct xgbe_prv_data *pdata)
439{ 441{
442 if (pdata->phy_if.phy_impl.an_pre)
443 pdata->phy_if.phy_impl.an_pre(pdata);
444
440 switch (pdata->an_mode) { 445 switch (pdata->an_mode) {
441 case XGBE_AN_MODE_CL73: 446 case XGBE_AN_MODE_CL73:
442 case XGBE_AN_MODE_CL73_REDRV: 447 case XGBE_AN_MODE_CL73_REDRV:
@@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata)
453 458
454static void xgbe_an_disable(struct xgbe_prv_data *pdata) 459static void xgbe_an_disable(struct xgbe_prv_data *pdata)
455{ 460{
461 if (pdata->phy_if.phy_impl.an_post)
462 pdata->phy_if.phy_impl.an_post(pdata);
463
456 switch (pdata->an_mode) { 464 switch (pdata->an_mode) {
457 case XGBE_AN_MODE_CL73: 465 case XGBE_AN_MODE_CL73:
458 case XGBE_AN_MODE_CL73_REDRV: 466 case XGBE_AN_MODE_CL73_REDRV:
@@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
505 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, 513 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
506 reg); 514 reg);
507 515
508 if (pdata->phy_if.phy_impl.kr_training_post)
509 pdata->phy_if.phy_impl.kr_training_post(pdata);
510
511 netif_dbg(pdata, link, pdata->netdev, 516 netif_dbg(pdata, link, pdata->netdev,
512 "KR training initiated\n"); 517 "KR training initiated\n");
518
519 if (pdata->phy_if.phy_impl.kr_training_post)
520 pdata->phy_if.phy_impl.kr_training_post(pdata);
513 } 521 }
514 522
515 return XGBE_AN_PAGE_RECEIVED; 523 return XGBE_AN_PAGE_RECEIVED;
@@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
637 return XGBE_AN_NO_LINK; 645 return XGBE_AN_NO_LINK;
638 } 646 }
639 647
640 xgbe_an73_disable(pdata); 648 xgbe_an_disable(pdata);
641 649
642 xgbe_switch_mode(pdata); 650 xgbe_switch_mode(pdata);
643 651
644 xgbe_an73_restart(pdata); 652 xgbe_an_restart(pdata);
645 653
646 return XGBE_AN_INCOMPAT_LINK; 654 return XGBE_AN_INCOMPAT_LINK;
647} 655}
@@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
820 pdata->an_result = pdata->an_state; 828 pdata->an_result = pdata->an_state;
821 pdata->an_state = XGBE_AN_READY; 829 pdata->an_state = XGBE_AN_READY;
822 830
831 if (pdata->phy_if.phy_impl.an_post)
832 pdata->phy_if.phy_impl.an_post(pdata);
833
823 netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n", 834 netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
824 xgbe_state_as_string(pdata->an_result)); 835 xgbe_state_as_string(pdata->an_result));
825 } 836 }
@@ -903,6 +914,9 @@ again:
903 pdata->kx_state = XGBE_RX_BPA; 914 pdata->kx_state = XGBE_RX_BPA;
904 pdata->an_start = 0; 915 pdata->an_start = 0;
905 916
917 if (pdata->phy_if.phy_impl.an_post)
918 pdata->phy_if.phy_impl.an_post(pdata);
919
906 netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n", 920 netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
907 xgbe_state_as_string(pdata->an_result)); 921 xgbe_state_as_string(pdata->an_result));
908 } 922 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index eb23f9ba1a9a..82d1f416ee2a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = {
456 .irq_reissue_support = 1, 456 .irq_reissue_support = 1,
457 .tx_desc_prefetch = 5, 457 .tx_desc_prefetch = 5,
458 .rx_desc_prefetch = 5, 458 .rx_desc_prefetch = 5,
459 .an_cdr_workaround = 1,
459}; 460};
460 461
461static const struct xgbe_version_data xgbe_v2b = { 462static const struct xgbe_version_data xgbe_v2b = {
@@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = {
470 .irq_reissue_support = 1, 471 .irq_reissue_support = 1,
471 .tx_desc_prefetch = 5, 472 .tx_desc_prefetch = 5,
472 .rx_desc_prefetch = 5, 473 .rx_desc_prefetch = 5,
474 .an_cdr_workaround = 1,
473}; 475};
474 476
475static const struct pci_device_id xgbe_pci_table[] = { 477static const struct pci_device_id xgbe_pci_table[] = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 3304a291aa96..aac884314000 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -147,6 +147,14 @@
147/* Rate-change complete wait/retry count */ 147/* Rate-change complete wait/retry count */
148#define XGBE_RATECHANGE_COUNT 500 148#define XGBE_RATECHANGE_COUNT 500
149 149
150/* CDR delay values for KR support (in usec) */
151#define XGBE_CDR_DELAY_INIT 10000
152#define XGBE_CDR_DELAY_INC 10000
153#define XGBE_CDR_DELAY_MAX 100000
154
155/* RRC frequency during link status check */
156#define XGBE_RRC_FREQUENCY 10
157
150enum xgbe_port_mode { 158enum xgbe_port_mode {
151 XGBE_PORT_MODE_RSVD = 0, 159 XGBE_PORT_MODE_RSVD = 0,
152 XGBE_PORT_MODE_BACKPLANE, 160 XGBE_PORT_MODE_BACKPLANE,
@@ -245,6 +253,10 @@ enum xgbe_sfp_speed {
245#define XGBE_SFP_BASE_VENDOR_SN 4 253#define XGBE_SFP_BASE_VENDOR_SN 4
246#define XGBE_SFP_BASE_VENDOR_SN_LEN 16 254#define XGBE_SFP_BASE_VENDOR_SN_LEN 16
247 255
256#define XGBE_SFP_EXTD_OPT1 1
257#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1)
258#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3)
259
248#define XGBE_SFP_EXTD_DIAG 28 260#define XGBE_SFP_EXTD_DIAG 28
249#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) 261#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
250 262
@@ -324,6 +336,7 @@ struct xgbe_phy_data {
324 336
325 unsigned int sfp_gpio_address; 337 unsigned int sfp_gpio_address;
326 unsigned int sfp_gpio_mask; 338 unsigned int sfp_gpio_mask;
339 unsigned int sfp_gpio_inputs;
327 unsigned int sfp_gpio_rx_los; 340 unsigned int sfp_gpio_rx_los;
328 unsigned int sfp_gpio_tx_fault; 341 unsigned int sfp_gpio_tx_fault;
329 unsigned int sfp_gpio_mod_absent; 342 unsigned int sfp_gpio_mod_absent;
@@ -355,6 +368,10 @@ struct xgbe_phy_data {
355 unsigned int redrv_addr; 368 unsigned int redrv_addr;
356 unsigned int redrv_lane; 369 unsigned int redrv_lane;
357 unsigned int redrv_model; 370 unsigned int redrv_model;
371
372 /* KR AN support */
373 unsigned int phy_cdr_notrack;
374 unsigned int phy_cdr_delay;
358}; 375};
359 376
360/* I2C, MDIO and GPIO lines are muxed, so only one device at a time */ 377/* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
@@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
974 phy_data->sfp_phy_avail = 1; 991 phy_data->sfp_phy_avail = 1;
975} 992}
976 993
994static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data)
995{
996 u8 *sfp_extd = phy_data->sfp_eeprom.extd;
997
998 if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS))
999 return false;
1000
1001 if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS)
1002 return false;
1003
1004 if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los))
1005 return true;
1006
1007 return false;
1008}
1009
1010static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data)
1011{
1012 u8 *sfp_extd = phy_data->sfp_eeprom.extd;
1013
1014 if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT))
1015 return false;
1016
1017 if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT)
1018 return false;
1019
1020 if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault))
1021 return true;
1022
1023 return false;
1024}
1025
1026static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
1027{
1028 if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT)
1029 return false;
1030
1031 if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent))
1032 return true;
1033
1034 return false;
1035}
1036
977static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata) 1037static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
978{ 1038{
979 struct xgbe_phy_data *phy_data = pdata->phy_data; 1039 struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
1019 if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP) 1079 if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP)
1020 return; 1080 return;
1021 1081
1082 /* Update transceiver signals (eeprom extd/options) */
1083 phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
1084 phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
1085
1022 if (xgbe_phy_sfp_parse_quirks(pdata)) 1086 if (xgbe_phy_sfp_parse_quirks(pdata))
1023 return; 1087 return;
1024 1088
@@ -1184,7 +1248,6 @@ put:
1184static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) 1248static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
1185{ 1249{
1186 struct xgbe_phy_data *phy_data = pdata->phy_data; 1250 struct xgbe_phy_data *phy_data = pdata->phy_data;
1187 unsigned int gpio_input;
1188 u8 gpio_reg, gpio_ports[2]; 1251 u8 gpio_reg, gpio_ports[2];
1189 int ret; 1252 int ret;
1190 1253
@@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
1199 return; 1262 return;
1200 } 1263 }
1201 1264
1202 gpio_input = (gpio_ports[1] << 8) | gpio_ports[0]; 1265 phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0];
1203
1204 if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) {
1205 /* No GPIO, just assume the module is present for now */
1206 phy_data->sfp_mod_absent = 0;
1207 } else {
1208 if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
1209 phy_data->sfp_mod_absent = 0;
1210 }
1211
1212 if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) &&
1213 (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
1214 phy_data->sfp_rx_los = 1;
1215 1266
1216 if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) && 1267 phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data);
1217 (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
1218 phy_data->sfp_tx_fault = 1;
1219} 1268}
1220 1269
1221static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata) 1270static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
@@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
2361 return 1; 2410 return 1;
2362 2411
2363 /* No link, attempt a receiver reset cycle */ 2412 /* No link, attempt a receiver reset cycle */
2364 if (phy_data->rrc_count++) { 2413 if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
2365 phy_data->rrc_count = 0; 2414 phy_data->rrc_count = 0;
2366 xgbe_phy_rrc(pdata); 2415 xgbe_phy_rrc(pdata);
2367 } 2416 }
@@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
2669 return true; 2718 return true;
2670} 2719}
2671 2720
2721static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata)
2722{
2723 struct xgbe_phy_data *phy_data = pdata->phy_data;
2724
2725 if (!pdata->debugfs_an_cdr_workaround)
2726 return;
2727
2728 if (!phy_data->phy_cdr_notrack)
2729 return;
2730
2731 usleep_range(phy_data->phy_cdr_delay,
2732 phy_data->phy_cdr_delay + 500);
2733
2734 XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
2735 XGBE_PMA_CDR_TRACK_EN_MASK,
2736 XGBE_PMA_CDR_TRACK_EN_ON);
2737
2738 phy_data->phy_cdr_notrack = 0;
2739}
2740
2741static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata)
2742{
2743 struct xgbe_phy_data *phy_data = pdata->phy_data;
2744
2745 if (!pdata->debugfs_an_cdr_workaround)
2746 return;
2747
2748 if (phy_data->phy_cdr_notrack)
2749 return;
2750
2751 XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
2752 XGBE_PMA_CDR_TRACK_EN_MASK,
2753 XGBE_PMA_CDR_TRACK_EN_OFF);
2754
2755 xgbe_phy_rrc(pdata);
2756
2757 phy_data->phy_cdr_notrack = 1;
2758}
2759
2760static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
2761{
2762 if (!pdata->debugfs_an_cdr_track_early)
2763 xgbe_phy_cdr_track(pdata);
2764}
2765
2766static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
2767{
2768 if (pdata->debugfs_an_cdr_track_early)
2769 xgbe_phy_cdr_track(pdata);
2770}
2771
2772static void xgbe_phy_an_post(struct xgbe_prv_data *pdata)
2773{
2774 struct xgbe_phy_data *phy_data = pdata->phy_data;
2775
2776 switch (pdata->an_mode) {
2777 case XGBE_AN_MODE_CL73:
2778 case XGBE_AN_MODE_CL73_REDRV:
2779 if (phy_data->cur_mode != XGBE_MODE_KR)
2780 break;
2781
2782 xgbe_phy_cdr_track(pdata);
2783
2784 switch (pdata->an_result) {
2785 case XGBE_AN_READY:
2786 case XGBE_AN_COMPLETE:
2787 break;
2788 default:
2789 if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX)
2790 phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC;
2791 else
2792 phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
2793 break;
2794 }
2795 break;
2796 default:
2797 break;
2798 }
2799}
2800
2801static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata)
2802{
2803 struct xgbe_phy_data *phy_data = pdata->phy_data;
2804
2805 switch (pdata->an_mode) {
2806 case XGBE_AN_MODE_CL73:
2807 case XGBE_AN_MODE_CL73_REDRV:
2808 if (phy_data->cur_mode != XGBE_MODE_KR)
2809 break;
2810
2811 xgbe_phy_cdr_notrack(pdata);
2812 break;
2813 default:
2814 break;
2815 }
2816}
2817
2672static void xgbe_phy_stop(struct xgbe_prv_data *pdata) 2818static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
2673{ 2819{
2674 struct xgbe_phy_data *phy_data = pdata->phy_data; 2820 struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
2680 xgbe_phy_sfp_reset(phy_data); 2826 xgbe_phy_sfp_reset(phy_data);
2681 xgbe_phy_sfp_mod_absent(pdata); 2827 xgbe_phy_sfp_mod_absent(pdata);
2682 2828
2829 /* Reset CDR support */
2830 xgbe_phy_cdr_track(pdata);
2831
2683 /* Power off the PHY */ 2832 /* Power off the PHY */
2684 xgbe_phy_power_off(pdata); 2833 xgbe_phy_power_off(pdata);
2685 2834
@@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
2712 /* Start in highest supported mode */ 2861 /* Start in highest supported mode */
2713 xgbe_phy_set_mode(pdata, phy_data->start_mode); 2862 xgbe_phy_set_mode(pdata, phy_data->start_mode);
2714 2863
2864 /* Reset CDR support */
2865 xgbe_phy_cdr_track(pdata);
2866
2715 /* After starting the I2C controller, we can check for an SFP */ 2867 /* After starting the I2C controller, we can check for an SFP */
2716 switch (phy_data->port_mode) { 2868 switch (phy_data->port_mode) {
2717 case XGBE_PORT_MODE_SFP: 2869 case XGBE_PORT_MODE_SFP:
@@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
3019 } 3171 }
3020 } 3172 }
3021 3173
3174 phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
3175
3022 /* Register for driving external PHYs */ 3176 /* Register for driving external PHYs */
3023 mii = devm_mdiobus_alloc(pdata->dev); 3177 mii = devm_mdiobus_alloc(pdata->dev);
3024 if (!mii) { 3178 if (!mii) {
@@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
3071 phy_impl->an_advertising = xgbe_phy_an_advertising; 3225 phy_impl->an_advertising = xgbe_phy_an_advertising;
3072 3226
3073 phy_impl->an_outcome = xgbe_phy_an_outcome; 3227 phy_impl->an_outcome = xgbe_phy_an_outcome;
3228
3229 phy_impl->an_pre = xgbe_phy_an_pre;
3230 phy_impl->an_post = xgbe_phy_an_post;
3231
3232 phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
3233 phy_impl->kr_training_post = xgbe_phy_kr_training_post;
3074} 3234}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index ad102c8bac7b..95d4b56448c6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -833,6 +833,7 @@ struct xgbe_hw_if {
833/* This structure represents implementation specific routines for an 833/* This structure represents implementation specific routines for an
834 * implementation of a PHY. All routines are required unless noted below. 834 * implementation of a PHY. All routines are required unless noted below.
835 * Optional routines: 835 * Optional routines:
836 * an_pre, an_post
836 * kr_training_pre, kr_training_post 837 * kr_training_pre, kr_training_post
837 */ 838 */
838struct xgbe_phy_impl_if { 839struct xgbe_phy_impl_if {
@@ -875,6 +876,10 @@ struct xgbe_phy_impl_if {
875 /* Process results of auto-negotiation */ 876 /* Process results of auto-negotiation */
876 enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *); 877 enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
877 878
879 /* Pre/Post auto-negotiation support */
880 void (*an_pre)(struct xgbe_prv_data *);
881 void (*an_post)(struct xgbe_prv_data *);
882
878 /* Pre/Post KR training enablement support */ 883 /* Pre/Post KR training enablement support */
879 void (*kr_training_pre)(struct xgbe_prv_data *); 884 void (*kr_training_pre)(struct xgbe_prv_data *);
880 void (*kr_training_post)(struct xgbe_prv_data *); 885 void (*kr_training_post)(struct xgbe_prv_data *);
@@ -989,6 +994,7 @@ struct xgbe_version_data {
989 unsigned int irq_reissue_support; 994 unsigned int irq_reissue_support;
990 unsigned int tx_desc_prefetch; 995 unsigned int tx_desc_prefetch;
991 unsigned int rx_desc_prefetch; 996 unsigned int rx_desc_prefetch;
997 unsigned int an_cdr_workaround;
992}; 998};
993 999
994struct xgbe_vxlan_data { 1000struct xgbe_vxlan_data {
@@ -1257,6 +1263,9 @@ struct xgbe_prv_data {
1257 unsigned int debugfs_xprop_reg; 1263 unsigned int debugfs_xprop_reg;
1258 1264
1259 unsigned int debugfs_xi2c_reg; 1265 unsigned int debugfs_xi2c_reg;
1266
1267 bool debugfs_an_cdr_workaround;
1268 bool debugfs_an_cdr_track_early;
1260}; 1269};
1261 1270
1262/* Function prototypes*/ 1271/* Function prototypes*/
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 2df01ad98df7..6e8d6a6f6aaf 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1128,7 +1128,7 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1128 if (!adapter->rx_pool) 1128 if (!adapter->rx_pool)
1129 return; 1129 return;
1130 1130
1131 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 1131 rx_scrqs = adapter->num_active_rx_pools;
1132 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1132 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1133 1133
1134 /* Free any remaining skbs in the rx buffer pools */ 1134 /* Free any remaining skbs in the rx buffer pools */
@@ -1177,7 +1177,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1177 if (!adapter->tx_pool || !adapter->tso_pool) 1177 if (!adapter->tx_pool || !adapter->tso_pool)
1178 return; 1178 return;
1179 1179
1180 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 1180 tx_scrqs = adapter->num_active_tx_pools;
1181 1181
1182 /* Free any remaining skbs in the tx buffer pools */ 1182 /* Free any remaining skbs in the tx buffer pools */
1183 for (i = 0; i < tx_scrqs; i++) { 1183 for (i = 0; i < tx_scrqs; i++) {
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 5b13ca1bd85f..7dc5f045e969 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -586,7 +586,7 @@ struct ice_sw_rule_lg_act {
586#define ICE_LG_ACT_MIRROR_VSI_ID_S 3 586#define ICE_LG_ACT_MIRROR_VSI_ID_S 3
587#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S) 587#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S)
588 588
589 /* Action type = 5 - Large Action */ 589 /* Action type = 5 - Generic Value */
590#define ICE_LG_ACT_GENERIC 0x5 590#define ICE_LG_ACT_GENERIC 0x5
591#define ICE_LG_ACT_GENERIC_VALUE_S 3 591#define ICE_LG_ACT_GENERIC_VALUE_S 3
592#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S) 592#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S)
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 21977ec984c4..71d032cc5fa7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -78,6 +78,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
78 struct ice_aq_desc desc; 78 struct ice_aq_desc desc;
79 enum ice_status status; 79 enum ice_status status;
80 u16 flags; 80 u16 flags;
81 u8 i;
81 82
82 cmd = &desc.params.mac_read; 83 cmd = &desc.params.mac_read;
83 84
@@ -98,8 +99,16 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
98 return ICE_ERR_CFG; 99 return ICE_ERR_CFG;
99 } 100 }
100 101
101 ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr); 102 /* A single port can report up to two (LAN and WoL) addresses */
102 ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr); 103 for (i = 0; i < cmd->num_addr; i++)
104 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
105 ether_addr_copy(hw->port_info->mac.lan_addr,
106 resp[i].mac_addr);
107 ether_addr_copy(hw->port_info->mac.perm_addr,
108 resp[i].mac_addr);
109 break;
110 }
111
103 return 0; 112 return 0;
104} 113}
105 114
@@ -464,9 +473,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
464 if (status) 473 if (status)
465 goto err_unroll_sched; 474 goto err_unroll_sched;
466 475
467 /* Get port MAC information */ 476 /* Get MAC information */
468 mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp); 477 /* A single port can report up to two (LAN and WoL) addresses */
469 mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL); 478 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
479 sizeof(struct ice_aqc_manage_mac_read_resp),
480 GFP_KERNEL);
481 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
470 482
471 if (!mac_buf) { 483 if (!mac_buf) {
472 status = ICE_ERR_NO_MEMORY; 484 status = ICE_ERR_NO_MEMORY;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 1b9e2ef48a9d..499904874b3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -121,8 +121,6 @@
121#define PFINT_FW_CTL_CAUSE_ENA_S 30 121#define PFINT_FW_CTL_CAUSE_ENA_S 30
122#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) 122#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
123#define PFINT_OICR 0x0016CA00 123#define PFINT_OICR 0x0016CA00
124#define PFINT_OICR_INTEVENT_S 0
125#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S)
126#define PFINT_OICR_HLP_RDY_S 14 124#define PFINT_OICR_HLP_RDY_S 14
127#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) 125#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
128#define PFINT_OICR_CPM_RDY_S 15 126#define PFINT_OICR_CPM_RDY_S 15
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 210b7910f1cd..5299caf55a7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1722,9 +1722,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1722 oicr = rd32(hw, PFINT_OICR); 1722 oicr = rd32(hw, PFINT_OICR);
1723 ena_mask = rd32(hw, PFINT_OICR_ENA); 1723 ena_mask = rd32(hw, PFINT_OICR_ENA);
1724 1724
1725 if (!(oicr & PFINT_OICR_INTEVENT_M))
1726 goto ena_intr;
1727
1728 if (oicr & PFINT_OICR_GRST_M) { 1725 if (oicr & PFINT_OICR_GRST_M) {
1729 u32 reset; 1726 u32 reset;
1730 /* we have a reset warning */ 1727 /* we have a reset warning */
@@ -1782,7 +1779,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1782 } 1779 }
1783 ret = IRQ_HANDLED; 1780 ret = IRQ_HANDLED;
1784 1781
1785ena_intr:
1786 /* re-enable interrupt causes that are not handled during this pass */ 1782 /* re-enable interrupt causes that are not handled during this pass */
1787 wr32(hw, PFINT_OICR_ENA, ena_mask); 1783 wr32(hw, PFINT_OICR_ENA, ena_mask);
1788 if (!test_bit(__ICE_DOWN, pf->state)) { 1784 if (!test_bit(__ICE_DOWN, pf->state)) {
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index f16ff3e4a840..2e6c1d92cc88 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -751,14 +751,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
751 u16 num_added = 0; 751 u16 num_added = 0;
752 u32 temp; 752 u32 temp;
753 753
754 *num_nodes_added = 0;
755
754 if (!num_nodes) 756 if (!num_nodes)
755 return status; 757 return status;
756 758
757 if (!parent || layer < hw->sw_entry_point_layer) 759 if (!parent || layer < hw->sw_entry_point_layer)
758 return ICE_ERR_PARAM; 760 return ICE_ERR_PARAM;
759 761
760 *num_nodes_added = 0;
761
762 /* max children per node per layer */ 762 /* max children per node per layer */
763 max_child_nodes = 763 max_child_nodes =
764 le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); 764 le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c1c0bc30a16d..cce7ada89255 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1700,7 +1700,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
1700 WARN_ON(hw->mac.type != e1000_i210); 1700 WARN_ON(hw->mac.type != e1000_i210);
1701 WARN_ON(queue < 0 || queue > 1); 1701 WARN_ON(queue < 0 || queue > 1);
1702 1702
1703 if (enable) { 1703 if (enable || queue == 0) {
1704 /* i210 does not allow the queue 0 to be in the Strict
1705 * Priority mode while the Qav mode is enabled, so,
1706 * instead of disabling strict priority mode, we give
1707 * queue 0 the maximum of credits possible.
1708 *
1709 * See section 8.12.19 of the i210 datasheet, "Note:
1710 * Queue0 QueueMode must be set to 1b when
1711 * TransmitMode is set to Qav."
1712 */
1713 if (queue == 0 && !enable) {
1714 /* max "linkspeed" idleslope in kbps */
1715 idleslope = 1000000;
1716 hicredit = ETH_FRAME_LEN;
1717 }
1718
1704 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); 1719 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1705 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); 1720 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1706 1721
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 3d9033f26eff..e3d04f226d57 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3420,7 +3420,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3420 if (!err) 3420 if (!err)
3421 continue; 3421 continue;
3422 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); 3422 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
3423 break; 3423 goto err_setup_tx;
3424 } 3424 }
3425 3425
3426 return 0; 3426 return 0;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 83ce229f4eb7..63036d9bf3e6 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3999,29 +3999,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx)
3999 atomic_set(&efx->active_queues, 0); 3999 atomic_set(&efx->active_queues, 0);
4000} 4000}
4001 4001
4002static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
4003 const struct efx_filter_spec *right)
4004{
4005 if ((left->match_flags ^ right->match_flags) |
4006 ((left->flags ^ right->flags) &
4007 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
4008 return false;
4009
4010 return memcmp(&left->outer_vid, &right->outer_vid,
4011 sizeof(struct efx_filter_spec) -
4012 offsetof(struct efx_filter_spec, outer_vid)) == 0;
4013}
4014
4015static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
4016{
4017 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
4018 return jhash2((const u32 *)&spec->outer_vid,
4019 (sizeof(struct efx_filter_spec) -
4020 offsetof(struct efx_filter_spec, outer_vid)) / 4,
4021 0);
4022 /* XXX should we randomise the initval? */
4023}
4024
4025/* Decide whether a filter should be exclusive or else should allow 4002/* Decide whether a filter should be exclusive or else should allow
4026 * delivery to additional recipients. Currently we decide that 4003 * delivery to additional recipients. Currently we decide that
4027 * filters for specific local unicast MAC and IP addresses are 4004 * filters for specific local unicast MAC and IP addresses are
@@ -4346,7 +4323,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4346 goto out_unlock; 4323 goto out_unlock;
4347 match_pri = rc; 4324 match_pri = rc;
4348 4325
4349 hash = efx_ef10_filter_hash(spec); 4326 hash = efx_filter_spec_hash(spec);
4350 is_mc_recip = efx_filter_is_mc_recipient(spec); 4327 is_mc_recip = efx_filter_is_mc_recipient(spec);
4351 if (is_mc_recip) 4328 if (is_mc_recip)
4352 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 4329 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
@@ -4378,7 +4355,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4378 if (!saved_spec) { 4355 if (!saved_spec) {
4379 if (ins_index < 0) 4356 if (ins_index < 0)
4380 ins_index = i; 4357 ins_index = i;
4381 } else if (efx_ef10_filter_equal(spec, saved_spec)) { 4358 } else if (efx_filter_spec_equal(spec, saved_spec)) {
4382 if (spec->priority < saved_spec->priority && 4359 if (spec->priority < saved_spec->priority &&
4383 spec->priority != EFX_FILTER_PRI_AUTO) { 4360 spec->priority != EFX_FILTER_PRI_AUTO) {
4384 rc = -EPERM; 4361 rc = -EPERM;
@@ -4762,27 +4739,62 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
4762static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 4739static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
4763 unsigned int filter_idx) 4740 unsigned int filter_idx)
4764{ 4741{
4742 struct efx_filter_spec *spec, saved_spec;
4765 struct efx_ef10_filter_table *table; 4743 struct efx_ef10_filter_table *table;
4766 struct efx_filter_spec *spec; 4744 struct efx_arfs_rule *rule = NULL;
4767 bool ret; 4745 bool ret = true, force = false;
4746 u16 arfs_id;
4768 4747
4769 down_read(&efx->filter_sem); 4748 down_read(&efx->filter_sem);
4770 table = efx->filter_state; 4749 table = efx->filter_state;
4771 down_write(&table->lock); 4750 down_write(&table->lock);
4772 spec = efx_ef10_filter_entry_spec(table, filter_idx); 4751 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4773 4752
4774 if (!spec || spec->priority != EFX_FILTER_PRI_HINT) { 4753 if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
4775 ret = true;
4776 goto out_unlock; 4754 goto out_unlock;
4777 }
4778 4755
4779 if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, 0)) { 4756 spin_lock_bh(&efx->rps_hash_lock);
4780 ret = false; 4757 if (!efx->rps_hash_table) {
4781 goto out_unlock; 4758 /* In the absence of the table, we always return 0 to ARFS. */
4759 arfs_id = 0;
4760 } else {
4761 rule = efx_rps_hash_find(efx, spec);
4762 if (!rule)
4763 /* ARFS table doesn't know of this filter, so remove it */
4764 goto expire;
4765 arfs_id = rule->arfs_id;
4766 ret = efx_rps_check_rule(rule, filter_idx, &force);
4767 if (force)
4768 goto expire;
4769 if (!ret) {
4770 spin_unlock_bh(&efx->rps_hash_lock);
4771 goto out_unlock;
4772 }
4782 } 4773 }
4783 4774 if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
4775 ret = false;
4776 else if (rule)
4777 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
4778expire:
4779 saved_spec = *spec; /* remove operation will kfree spec */
4780 spin_unlock_bh(&efx->rps_hash_lock);
4781 /* At this point (since we dropped the lock), another thread might queue
4782 * up a fresh insertion request (but the actual insertion will be held
4783 * up by our possession of the filter table lock). In that case, it
4784 * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
4785 * the rule is not removed by efx_rps_hash_del() below.
4786 */
4784 ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, 4787 ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
4785 filter_idx, true) == 0; 4788 filter_idx, true) == 0;
4789 /* While we can't safely dereference rule (we dropped the lock), we can
4790 * still test it for NULL.
4791 */
4792 if (ret && rule) {
4793 /* Expiring, so remove entry from ARFS table */
4794 spin_lock_bh(&efx->rps_hash_lock);
4795 efx_rps_hash_del(efx, &saved_spec);
4796 spin_unlock_bh(&efx->rps_hash_lock);
4797 }
4786out_unlock: 4798out_unlock:
4787 up_write(&table->lock); 4799 up_write(&table->lock);
4788 up_read(&efx->filter_sem); 4800 up_read(&efx->filter_sem);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 692dd729ee2a..a4ebd8715494 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3027,6 +3027,10 @@ static int efx_init_struct(struct efx_nic *efx,
3027 mutex_init(&efx->mac_lock); 3027 mutex_init(&efx->mac_lock);
3028#ifdef CONFIG_RFS_ACCEL 3028#ifdef CONFIG_RFS_ACCEL
3029 mutex_init(&efx->rps_mutex); 3029 mutex_init(&efx->rps_mutex);
3030 spin_lock_init(&efx->rps_hash_lock);
3031 /* Failure to allocate is not fatal, but may degrade ARFS performance */
3032 efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
3033 sizeof(*efx->rps_hash_table), GFP_KERNEL);
3030#endif 3034#endif
3031 efx->phy_op = &efx_dummy_phy_operations; 3035 efx->phy_op = &efx_dummy_phy_operations;
3032 efx->mdio.dev = net_dev; 3036 efx->mdio.dev = net_dev;
@@ -3070,6 +3074,10 @@ static void efx_fini_struct(struct efx_nic *efx)
3070{ 3074{
3071 int i; 3075 int i;
3072 3076
3077#ifdef CONFIG_RFS_ACCEL
3078 kfree(efx->rps_hash_table);
3079#endif
3080
3073 for (i = 0; i < EFX_MAX_CHANNELS; i++) 3081 for (i = 0; i < EFX_MAX_CHANNELS; i++)
3074 kfree(efx->channel[i]); 3082 kfree(efx->channel[i]);
3075 3083
@@ -3092,6 +3100,141 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
3092 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); 3100 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
3093} 3101}
3094 3102
3103bool efx_filter_spec_equal(const struct efx_filter_spec *left,
3104 const struct efx_filter_spec *right)
3105{
3106 if ((left->match_flags ^ right->match_flags) |
3107 ((left->flags ^ right->flags) &
3108 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
3109 return false;
3110
3111 return memcmp(&left->outer_vid, &right->outer_vid,
3112 sizeof(struct efx_filter_spec) -
3113 offsetof(struct efx_filter_spec, outer_vid)) == 0;
3114}
3115
3116u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
3117{
3118 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
3119 return jhash2((const u32 *)&spec->outer_vid,
3120 (sizeof(struct efx_filter_spec) -
3121 offsetof(struct efx_filter_spec, outer_vid)) / 4,
3122 0);
3123}
3124
3125#ifdef CONFIG_RFS_ACCEL
3126bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
3127 bool *force)
3128{
3129 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
3130 /* ARFS is currently updating this entry, leave it */
3131 return false;
3132 }
3133 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
3134 /* ARFS tried and failed to update this, so it's probably out
3135 * of date. Remove the filter and the ARFS rule entry.
3136 */
3137 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
3138 *force = true;
3139 return true;
3140 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
3141 /* ARFS has moved on, so old filter is not needed. Since we did
3142 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
3143 * not be removed by efx_rps_hash_del() subsequently.
3144 */
3145 *force = true;
3146 return true;
3147 }
3148 /* Remove it iff ARFS wants to. */
3149 return true;
3150}
3151
3152struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
3153 const struct efx_filter_spec *spec)
3154{
3155 u32 hash = efx_filter_spec_hash(spec);
3156
3157 WARN_ON(!spin_is_locked(&efx->rps_hash_lock));
3158 if (!efx->rps_hash_table)
3159 return NULL;
3160 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
3161}
3162
3163struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
3164 const struct efx_filter_spec *spec)
3165{
3166 struct efx_arfs_rule *rule;
3167 struct hlist_head *head;
3168 struct hlist_node *node;
3169
3170 head = efx_rps_hash_bucket(efx, spec);
3171 if (!head)
3172 return NULL;
3173 hlist_for_each(node, head) {
3174 rule = container_of(node, struct efx_arfs_rule, node);
3175 if (efx_filter_spec_equal(spec, &rule->spec))
3176 return rule;
3177 }
3178 return NULL;
3179}
3180
3181struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
3182 const struct efx_filter_spec *spec,
3183 bool *new)
3184{
3185 struct efx_arfs_rule *rule;
3186 struct hlist_head *head;
3187 struct hlist_node *node;
3188
3189 head = efx_rps_hash_bucket(efx, spec);
3190 if (!head)
3191 return NULL;
3192 hlist_for_each(node, head) {
3193 rule = container_of(node, struct efx_arfs_rule, node);
3194 if (efx_filter_spec_equal(spec, &rule->spec)) {
3195 *new = false;
3196 return rule;
3197 }
3198 }
3199 rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
3200 *new = true;
3201 if (rule) {
3202 memcpy(&rule->spec, spec, sizeof(rule->spec));
3203 hlist_add_head(&rule->node, head);
3204 }
3205 return rule;
3206}
3207
3208void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
3209{
3210 struct efx_arfs_rule *rule;
3211 struct hlist_head *head;
3212 struct hlist_node *node;
3213
3214 head = efx_rps_hash_bucket(efx, spec);
3215 if (WARN_ON(!head))
3216 return;
3217 hlist_for_each(node, head) {
3218 rule = container_of(node, struct efx_arfs_rule, node);
3219 if (efx_filter_spec_equal(spec, &rule->spec)) {
3220 /* Someone already reused the entry. We know that if
3221 * this check doesn't fire (i.e. filter_id == REMOVING)
3222 * then the REMOVING mark was put there by our caller,
3223 * because caller is holding a lock on filter table and
3224 * only holders of that lock set REMOVING.
3225 */
3226 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
3227 return;
3228 hlist_del(node);
3229 kfree(rule);
3230 return;
3231 }
3232 }
3233 /* We didn't find it. */
3234 WARN_ON(1);
3235}
3236#endif
3237
3095/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because 3238/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
3096 * (a) this is an infrequent control-plane operation and (b) n is small (max 64) 3239 * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
3097 */ 3240 */
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index a3140e16fcef..3f759ebdcf10 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -186,6 +186,27 @@ static inline void efx_filter_rfs_expire(struct work_struct *data) {}
186#endif 186#endif
187bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); 187bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
188 188
189bool efx_filter_spec_equal(const struct efx_filter_spec *left,
190 const struct efx_filter_spec *right);
191u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
192
193#ifdef CONFIG_RFS_ACCEL
194bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
195 bool *force);
196
197struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
198 const struct efx_filter_spec *spec);
199
200/* @new is written to indicate if entry was newly added (true) or if an old
201 * entry was found and returned (false).
202 */
203struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
204 const struct efx_filter_spec *spec,
205 bool *new);
206
207void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
208#endif
209
189/* RSS contexts */ 210/* RSS contexts */
190struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); 211struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
191struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); 212struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 7174ef5e5c5e..c72adf8b52ea 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2905,18 +2905,45 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2905{ 2905{
2906 struct efx_farch_filter_state *state = efx->filter_state; 2906 struct efx_farch_filter_state *state = efx->filter_state;
2907 struct efx_farch_filter_table *table; 2907 struct efx_farch_filter_table *table;
2908 bool ret = false; 2908 bool ret = false, force = false;
2909 u16 arfs_id;
2909 2910
2910 down_write(&state->lock); 2911 down_write(&state->lock);
2912 spin_lock_bh(&efx->rps_hash_lock);
2911 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 2913 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2912 if (test_bit(index, table->used_bitmap) && 2914 if (test_bit(index, table->used_bitmap) &&
2913 table->spec[index].priority == EFX_FILTER_PRI_HINT && 2915 table->spec[index].priority == EFX_FILTER_PRI_HINT) {
2914 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, 2916 struct efx_arfs_rule *rule = NULL;
2915 flow_id, 0)) { 2917 struct efx_filter_spec spec;
2916 efx_farch_filter_table_clear_entry(efx, table, index); 2918
2917 ret = true; 2919 efx_farch_filter_to_gen_spec(&spec, &table->spec[index]);
2920 if (!efx->rps_hash_table) {
2921 /* In the absence of the table, we always returned 0 to
2922 * ARFS, so use the same to query it.
2923 */
2924 arfs_id = 0;
2925 } else {
2926 rule = efx_rps_hash_find(efx, &spec);
2927 if (!rule) {
2928 /* ARFS table doesn't know of this filter, remove it */
2929 force = true;
2930 } else {
2931 arfs_id = rule->arfs_id;
2932 if (!efx_rps_check_rule(rule, index, &force))
2933 goto out_unlock;
2934 }
2935 }
2936 if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id,
2937 flow_id, arfs_id)) {
2938 if (rule)
2939 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
2940 efx_rps_hash_del(efx, &spec);
2941 efx_farch_filter_table_clear_entry(efx, table, index);
2942 ret = true;
2943 }
2918 } 2944 }
2919 2945out_unlock:
2946 spin_unlock_bh(&efx->rps_hash_lock);
2920 up_write(&state->lock); 2947 up_write(&state->lock);
2921 return ret; 2948 return ret;
2922} 2949}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index eea3808b3f25..65568925c3ef 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -734,6 +734,35 @@ struct efx_rss_context {
734}; 734};
735 735
736#ifdef CONFIG_RFS_ACCEL 736#ifdef CONFIG_RFS_ACCEL
737/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING
738 * is used to test if filter does or will exist.
739 */
740#define EFX_ARFS_FILTER_ID_PENDING -1
741#define EFX_ARFS_FILTER_ID_ERROR -2
742#define EFX_ARFS_FILTER_ID_REMOVING -3
743/**
744 * struct efx_arfs_rule - record of an ARFS filter and its IDs
745 * @node: linkage into hash table
746 * @spec: details of the filter (used as key for hash table). Use efx->type to
747 * determine which member to use.
748 * @rxq_index: channel to which the filter will steer traffic.
749 * @arfs_id: filter ID which was returned to ARFS
750 * @filter_id: index in software filter table. May be
751 * %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet,
752 * %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or
753 * %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter.
754 */
755struct efx_arfs_rule {
756 struct hlist_node node;
757 struct efx_filter_spec spec;
758 u16 rxq_index;
759 u16 arfs_id;
760 s32 filter_id;
761};
762
763/* Size chosen so that the table is one page (4kB) */
764#define EFX_ARFS_HASH_TABLE_SIZE 512
765
737/** 766/**
738 * struct efx_async_filter_insertion - Request to asynchronously insert a filter 767 * struct efx_async_filter_insertion - Request to asynchronously insert a filter
739 * @net_dev: Reference to the netdevice 768 * @net_dev: Reference to the netdevice
@@ -873,6 +902,10 @@ struct efx_async_filter_insertion {
873 * @rps_expire_channel's @rps_flow_id 902 * @rps_expire_channel's @rps_flow_id
874 * @rps_slot_map: bitmap of in-flight entries in @rps_slot 903 * @rps_slot_map: bitmap of in-flight entries in @rps_slot
875 * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work() 904 * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work()
905 * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and
906 * @rps_next_id).
907 * @rps_hash_table: Mapping between ARFS filters and their various IDs
908 * @rps_next_id: next arfs_id for an ARFS filter
876 * @active_queues: Count of RX and TX queues that haven't been flushed and drained. 909 * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
877 * @rxq_flush_pending: Count of number of receive queues that need to be flushed. 910 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
878 * Decremented when the efx_flush_rx_queue() is called. 911 * Decremented when the efx_flush_rx_queue() is called.
@@ -1029,6 +1062,9 @@ struct efx_nic {
1029 unsigned int rps_expire_index; 1062 unsigned int rps_expire_index;
1030 unsigned long rps_slot_map; 1063 unsigned long rps_slot_map;
1031 struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT]; 1064 struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT];
1065 spinlock_t rps_hash_lock;
1066 struct hlist_head *rps_hash_table;
1067 u32 rps_next_id;
1032#endif 1068#endif
1033 1069
1034 atomic_t active_queues; 1070 atomic_t active_queues;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 9c593c661cbf..64a94f242027 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -834,9 +834,29 @@ static void efx_filter_rfs_work(struct work_struct *data)
834 struct efx_nic *efx = netdev_priv(req->net_dev); 834 struct efx_nic *efx = netdev_priv(req->net_dev);
835 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); 835 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
836 int slot_idx = req - efx->rps_slot; 836 int slot_idx = req - efx->rps_slot;
837 struct efx_arfs_rule *rule;
838 u16 arfs_id = 0;
837 int rc; 839 int rc;
838 840
839 rc = efx->type->filter_insert(efx, &req->spec, true); 841 rc = efx->type->filter_insert(efx, &req->spec, true);
842 if (efx->rps_hash_table) {
843 spin_lock_bh(&efx->rps_hash_lock);
844 rule = efx_rps_hash_find(efx, &req->spec);
845 /* The rule might have already gone, if someone else's request
846 * for the same spec was already worked and then expired before
847 * we got around to our work. In that case we have nothing
848 * tying us to an arfs_id, meaning that as soon as the filter
849 * is considered for expiry it will be removed.
850 */
851 if (rule) {
852 if (rc < 0)
853 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
854 else
855 rule->filter_id = rc;
856 arfs_id = rule->arfs_id;
857 }
858 spin_unlock_bh(&efx->rps_hash_lock);
859 }
840 if (rc >= 0) { 860 if (rc >= 0) {
841 /* Remember this so we can check whether to expire the filter 861 /* Remember this so we can check whether to expire the filter
842 * later. 862 * later.
@@ -848,18 +868,18 @@ static void efx_filter_rfs_work(struct work_struct *data)
848 868
849 if (req->spec.ether_type == htons(ETH_P_IP)) 869 if (req->spec.ether_type == htons(ETH_P_IP))
850 netif_info(efx, rx_status, efx->net_dev, 870 netif_info(efx, rx_status, efx->net_dev,
851 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", 871 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
852 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 872 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
853 req->spec.rem_host, ntohs(req->spec.rem_port), 873 req->spec.rem_host, ntohs(req->spec.rem_port),
854 req->spec.loc_host, ntohs(req->spec.loc_port), 874 req->spec.loc_host, ntohs(req->spec.loc_port),
855 req->rxq_index, req->flow_id, rc); 875 req->rxq_index, req->flow_id, rc, arfs_id);
856 else 876 else
857 netif_info(efx, rx_status, efx->net_dev, 877 netif_info(efx, rx_status, efx->net_dev,
858 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", 878 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
859 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 879 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
860 req->spec.rem_host, ntohs(req->spec.rem_port), 880 req->spec.rem_host, ntohs(req->spec.rem_port),
861 req->spec.loc_host, ntohs(req->spec.loc_port), 881 req->spec.loc_host, ntohs(req->spec.loc_port),
862 req->rxq_index, req->flow_id, rc); 882 req->rxq_index, req->flow_id, rc, arfs_id);
863 } 883 }
864 884
865 /* Release references */ 885 /* Release references */
@@ -872,8 +892,10 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
872{ 892{
873 struct efx_nic *efx = netdev_priv(net_dev); 893 struct efx_nic *efx = netdev_priv(net_dev);
874 struct efx_async_filter_insertion *req; 894 struct efx_async_filter_insertion *req;
895 struct efx_arfs_rule *rule;
875 struct flow_keys fk; 896 struct flow_keys fk;
876 int slot_idx; 897 int slot_idx;
898 bool new;
877 int rc; 899 int rc;
878 900
879 /* find a free slot */ 901 /* find a free slot */
@@ -926,12 +948,42 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
926 req->spec.rem_port = fk.ports.src; 948 req->spec.rem_port = fk.ports.src;
927 req->spec.loc_port = fk.ports.dst; 949 req->spec.loc_port = fk.ports.dst;
928 950
951 if (efx->rps_hash_table) {
952 /* Add it to ARFS hash table */
953 spin_lock(&efx->rps_hash_lock);
954 rule = efx_rps_hash_add(efx, &req->spec, &new);
955 if (!rule) {
956 rc = -ENOMEM;
957 goto out_unlock;
958 }
959 if (new)
960 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
961 rc = rule->arfs_id;
962 /* Skip if existing or pending filter already does the right thing */
963 if (!new && rule->rxq_index == rxq_index &&
964 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
965 goto out_unlock;
966 rule->rxq_index = rxq_index;
967 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
968 spin_unlock(&efx->rps_hash_lock);
969 } else {
970 /* Without an ARFS hash table, we just use arfs_id 0 for all
971 * filters. This means if multiple flows hash to the same
972 * flow_id, all but the most recently touched will be eligible
973 * for expiry.
974 */
975 rc = 0;
976 }
977
978 /* Queue the request */
929 dev_hold(req->net_dev = net_dev); 979 dev_hold(req->net_dev = net_dev);
930 INIT_WORK(&req->work, efx_filter_rfs_work); 980 INIT_WORK(&req->work, efx_filter_rfs_work);
931 req->rxq_index = rxq_index; 981 req->rxq_index = rxq_index;
932 req->flow_id = flow_id; 982 req->flow_id = flow_id;
933 schedule_work(&req->work); 983 schedule_work(&req->work);
934 return 0; 984 return rc;
985out_unlock:
986 spin_unlock(&efx->rps_hash_lock);
935out_clear: 987out_clear:
936 clear_bit(slot_idx, &efx->rps_slot_map); 988 clear_bit(slot_idx, &efx->rps_slot_map);
937 return rc; 989 return rc;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 30371274409d..74f828412055 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -129,7 +129,7 @@ do { \
129 129
130#define RX_PRIORITY_MAPPING 0x76543210 130#define RX_PRIORITY_MAPPING 0x76543210
131#define TX_PRIORITY_MAPPING 0x33221100 131#define TX_PRIORITY_MAPPING 0x33221100
132#define CPDMA_TX_PRIORITY_MAP 0x01234567 132#define CPDMA_TX_PRIORITY_MAP 0x76543210
133 133
134#define CPSW_VLAN_AWARE BIT(1) 134#define CPSW_VLAN_AWARE BIT(1)
135#define CPSW_RX_VLAN_ENCAP BIT(2) 135#define CPSW_RX_VLAN_ENCAP BIT(2)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index c22e8e383247..25e2a099b71c 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1393,6 +1393,15 @@ static int m88e1318_set_wol(struct phy_device *phydev,
1393 if (err < 0) 1393 if (err < 0)
1394 goto error; 1394 goto error;
1395 1395
1396 /* If WOL event happened once, the LED[2] interrupt pin
1397 * will not be cleared unless we reading the interrupt status
1398 * register. If interrupts are in use, the normal interrupt
1399 * handling will clear the WOL event. Clear the WOL event
1400 * before enabling it if !phy_interrupt_is_valid()
1401 */
1402 if (!phy_interrupt_is_valid(phydev))
1403 phy_read(phydev, MII_M1011_IEVENT);
1404
1396 /* Enable the WOL interrupt */ 1405 /* Enable the WOL interrupt */
1397 err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, 1406 err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
1398 MII_88E1318S_PHY_CSIER_WOL_EIE); 1407 MII_88E1318S_PHY_CSIER_WOL_EIE);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 1483bc7b01e1..7df07337d69c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
620 lock_sock(sk); 620 lock_sock(sk);
621 621
622 error = -EINVAL; 622 error = -EINVAL;
623
624 if (sockaddr_len != sizeof(struct sockaddr_pppox))
625 goto end;
626
623 if (sp->sa_protocol != PX_PROTO_OE) 627 if (sp->sa_protocol != PX_PROTO_OE)
624 goto end; 628 goto end;
625 629
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index acbe84967834..ddb6bf85a59c 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1072,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
1072} 1072}
1073 1073
1074#ifdef CONFIG_NET_POLL_CONTROLLER 1074#ifdef CONFIG_NET_POLL_CONTROLLER
1075static int team_port_enable_netpoll(struct team *team, struct team_port *port) 1075static int __team_port_enable_netpoll(struct team_port *port)
1076{ 1076{
1077 struct netpoll *np; 1077 struct netpoll *np;
1078 int err; 1078 int err;
1079 1079
1080 if (!team->dev->npinfo)
1081 return 0;
1082
1083 np = kzalloc(sizeof(*np), GFP_KERNEL); 1080 np = kzalloc(sizeof(*np), GFP_KERNEL);
1084 if (!np) 1081 if (!np)
1085 return -ENOMEM; 1082 return -ENOMEM;
@@ -1093,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1093 return err; 1090 return err;
1094} 1091}
1095 1092
1093static int team_port_enable_netpoll(struct team_port *port)
1094{
1095 if (!port->team->dev->npinfo)
1096 return 0;
1097
1098 return __team_port_enable_netpoll(port);
1099}
1100
1096static void team_port_disable_netpoll(struct team_port *port) 1101static void team_port_disable_netpoll(struct team_port *port)
1097{ 1102{
1098 struct netpoll *np = port->np; 1103 struct netpoll *np = port->np;
@@ -1107,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port)
1107 kfree(np); 1112 kfree(np);
1108} 1113}
1109#else 1114#else
1110static int team_port_enable_netpoll(struct team *team, struct team_port *port) 1115static int team_port_enable_netpoll(struct team_port *port)
1111{ 1116{
1112 return 0; 1117 return 0;
1113} 1118}
@@ -1221,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1221 goto err_vids_add; 1226 goto err_vids_add;
1222 } 1227 }
1223 1228
1224 err = team_port_enable_netpoll(team, port); 1229 err = team_port_enable_netpoll(port);
1225 if (err) { 1230 if (err) {
1226 netdev_err(dev, "Failed to enable netpoll on device %s\n", 1231 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1227 portname); 1232 portname);
@@ -1918,7 +1923,7 @@ static int team_netpoll_setup(struct net_device *dev,
1918 1923
1919 mutex_lock(&team->lock); 1924 mutex_lock(&team->lock);
1920 list_for_each_entry(port, &team->port_list, list) { 1925 list_for_each_entry(port, &team->port_list, list) {
1921 err = team_port_enable_netpoll(team, port); 1926 err = __team_port_enable_netpoll(port);
1922 if (err) { 1927 if (err) {
1923 __team_netpoll_cleanup(team); 1928 __team_netpoll_cleanup(team);
1924 break; 1929 break;
diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
index a6b88c7f6e3e..d2970a009eb5 100644
--- a/drivers/pci/dwc/pcie-kirin.c
+++ b/drivers/pci/dwc/pcie-kirin.c
@@ -486,7 +486,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
486 return ret; 486 return ret;
487 487
488 kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, 488 kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
489 "reset-gpio", 0); 489 "reset-gpios", 0);
490 if (kirin_pcie->gpio_id_reset < 0) 490 if (kirin_pcie->gpio_id_reset < 0)
491 return -ENODEV; 491 return -ENODEV;
492 492
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index b04d37b3c5de..9abf549631b4 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -29,6 +29,7 @@
29#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5 29#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
30#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11) 30#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
31#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12 31#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
32#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
32#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0 33#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
33#define PCIE_CORE_LINK_L0S_ENTRY BIT(0) 34#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
34#define PCIE_CORE_LINK_TRAINING BIT(5) 35#define PCIE_CORE_LINK_TRAINING BIT(5)
@@ -100,7 +101,8 @@
100#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) 101#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
101#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) 102#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
102#define PCIE_ISR1_FLUSH BIT(5) 103#define PCIE_ISR1_FLUSH BIT(5)
103#define PCIE_ISR1_ALL_MASK GENMASK(5, 4) 104#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
105#define PCIE_ISR1_ALL_MASK GENMASK(11, 4)
104#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) 106#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
105#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) 107#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
106#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) 108#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
@@ -172,8 +174,6 @@
172#define PCIE_CONFIG_WR_TYPE0 0xa 174#define PCIE_CONFIG_WR_TYPE0 0xa
173#define PCIE_CONFIG_WR_TYPE1 0xb 175#define PCIE_CONFIG_WR_TYPE1 0xb
174 176
175/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
176#define PCIE_BDF(dev) (dev << 4)
177#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20) 177#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20)
178#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15) 178#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15)
179#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12) 179#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12)
@@ -296,7 +296,8 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
296 reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE | 296 reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
297 (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) | 297 (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
298 PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE | 298 PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
299 PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT; 299 (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
300 PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
300 advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG); 301 advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
301 302
302 /* Program PCIe Control 2 to disable strict ordering */ 303 /* Program PCIe Control 2 to disable strict ordering */
@@ -437,7 +438,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
437 u32 reg; 438 u32 reg;
438 int ret; 439 int ret;
439 440
440 if (PCI_SLOT(devfn) != 0) { 441 if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
441 *val = 0xffffffff; 442 *val = 0xffffffff;
442 return PCIBIOS_DEVICE_NOT_FOUND; 443 return PCIBIOS_DEVICE_NOT_FOUND;
443 } 444 }
@@ -456,7 +457,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
456 advk_writel(pcie, reg, PIO_CTRL); 457 advk_writel(pcie, reg, PIO_CTRL);
457 458
458 /* Program the address registers */ 459 /* Program the address registers */
459 reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where); 460 reg = PCIE_CONF_ADDR(bus->number, devfn, where);
460 advk_writel(pcie, reg, PIO_ADDR_LS); 461 advk_writel(pcie, reg, PIO_ADDR_LS);
461 advk_writel(pcie, 0, PIO_ADDR_MS); 462 advk_writel(pcie, 0, PIO_ADDR_MS);
462 463
@@ -491,7 +492,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
491 int offset; 492 int offset;
492 int ret; 493 int ret;
493 494
494 if (PCI_SLOT(devfn) != 0) 495 if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
495 return PCIBIOS_DEVICE_NOT_FOUND; 496 return PCIBIOS_DEVICE_NOT_FOUND;
496 497
497 if (where % size) 498 if (where % size)
@@ -609,9 +610,9 @@ static void advk_pcie_irq_mask(struct irq_data *d)
609 irq_hw_number_t hwirq = irqd_to_hwirq(d); 610 irq_hw_number_t hwirq = irqd_to_hwirq(d);
610 u32 mask; 611 u32 mask;
611 612
612 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); 613 mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
613 mask |= PCIE_ISR0_INTX_ASSERT(hwirq); 614 mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
614 advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); 615 advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
615} 616}
616 617
617static void advk_pcie_irq_unmask(struct irq_data *d) 618static void advk_pcie_irq_unmask(struct irq_data *d)
@@ -620,9 +621,9 @@ static void advk_pcie_irq_unmask(struct irq_data *d)
620 irq_hw_number_t hwirq = irqd_to_hwirq(d); 621 irq_hw_number_t hwirq = irqd_to_hwirq(d);
621 u32 mask; 622 u32 mask;
622 623
623 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); 624 mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
624 mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq); 625 mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
625 advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); 626 advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
626} 627}
627 628
628static int advk_pcie_irq_map(struct irq_domain *h, 629static int advk_pcie_irq_map(struct irq_domain *h,
@@ -765,29 +766,35 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
765 766
766static void advk_pcie_handle_int(struct advk_pcie *pcie) 767static void advk_pcie_handle_int(struct advk_pcie *pcie)
767{ 768{
768 u32 val, mask, status; 769 u32 isr0_val, isr0_mask, isr0_status;
770 u32 isr1_val, isr1_mask, isr1_status;
769 int i, virq; 771 int i, virq;
770 772
771 val = advk_readl(pcie, PCIE_ISR0_REG); 773 isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
772 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); 774 isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
773 status = val & ((~mask) & PCIE_ISR0_ALL_MASK); 775 isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
776
777 isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
778 isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
779 isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
774 780
775 if (!status) { 781 if (!isr0_status && !isr1_status) {
776 advk_writel(pcie, val, PCIE_ISR0_REG); 782 advk_writel(pcie, isr0_val, PCIE_ISR0_REG);
783 advk_writel(pcie, isr1_val, PCIE_ISR1_REG);
777 return; 784 return;
778 } 785 }
779 786
780 /* Process MSI interrupts */ 787 /* Process MSI interrupts */
781 if (status & PCIE_ISR0_MSI_INT_PENDING) 788 if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
782 advk_pcie_handle_msi(pcie); 789 advk_pcie_handle_msi(pcie);
783 790
784 /* Process legacy interrupts */ 791 /* Process legacy interrupts */
785 for (i = 0; i < PCI_NUM_INTX; i++) { 792 for (i = 0; i < PCI_NUM_INTX; i++) {
786 if (!(status & PCIE_ISR0_INTX_ASSERT(i))) 793 if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
787 continue; 794 continue;
788 795
789 advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i), 796 advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
790 PCIE_ISR0_REG); 797 PCIE_ISR1_REG);
791 798
792 virq = irq_find_mapping(pcie->irq_domain, i); 799 virq = irq_find_mapping(pcie->irq_domain, i);
793 generic_handle_irq(virq); 800 generic_handle_irq(virq);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 6ace47099fc5..b9a131137e64 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -958,10 +958,11 @@ static int pci_pm_freeze(struct device *dev)
958 * devices should not be touched during freeze/thaw transitions, 958 * devices should not be touched during freeze/thaw transitions,
959 * however. 959 * however.
960 */ 960 */
961 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) 961 if (!dev_pm_smart_suspend_and_suspended(dev)) {
962 pm_runtime_resume(dev); 962 pm_runtime_resume(dev);
963 pci_dev->state_saved = false;
964 }
963 965
964 pci_dev->state_saved = false;
965 if (pm->freeze) { 966 if (pm->freeze) {
966 int error; 967 int error;
967 968
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e597655a5643..a04197ce767d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5273,11 +5273,11 @@ void pcie_print_link_status(struct pci_dev *dev)
5273 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width); 5273 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5274 5274
5275 if (bw_avail >= bw_cap) 5275 if (bw_avail >= bw_cap)
5276 pci_info(dev, "%u.%03u Gb/s available bandwidth (%s x%d link)\n", 5276 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5277 bw_cap / 1000, bw_cap % 1000, 5277 bw_cap / 1000, bw_cap % 1000,
5278 PCIE_SPEED2STR(speed_cap), width_cap); 5278 PCIE_SPEED2STR(speed_cap), width_cap);
5279 else 5279 else
5280 pci_info(dev, "%u.%03u Gb/s available bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", 5280 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5281 bw_avail / 1000, bw_avail % 1000, 5281 bw_avail / 1000, bw_avail % 1000,
5282 PCIE_SPEED2STR(speed), width, 5282 PCIE_SPEED2STR(speed), width,
5283 limiting_dev ? pci_name(limiting_dev) : "<unknown>", 5283 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 62f5f04d8f61..5e963fe0e38d 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
592int dasd_alias_add_device(struct dasd_device *device) 592int dasd_alias_add_device(struct dasd_device *device)
593{ 593{
594 struct dasd_eckd_private *private = device->private; 594 struct dasd_eckd_private *private = device->private;
595 struct alias_lcu *lcu; 595 __u8 uaddr = private->uid.real_unit_addr;
596 struct alias_lcu *lcu = private->lcu;
596 unsigned long flags; 597 unsigned long flags;
597 int rc; 598 int rc;
598 599
599 lcu = private->lcu;
600 rc = 0; 600 rc = 0;
601 spin_lock_irqsave(&lcu->lock, flags); 601 spin_lock_irqsave(&lcu->lock, flags);
602 /*
603 * Check if device and lcu type differ. If so, the uac data may be
604 * outdated and needs to be updated.
605 */
606 if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
607 lcu->flags |= UPDATE_PENDING;
608 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
609 "uid type mismatch - trigger rescan");
610 }
602 if (!(lcu->flags & UPDATE_PENDING)) { 611 if (!(lcu->flags & UPDATE_PENDING)) {
603 rc = _add_device_to_lcu(lcu, device, device); 612 rc = _add_device_to_lcu(lcu, device, device);
604 if (rc) 613 if (rc)
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6652a49a49b1..9029804dcd22 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -452,6 +452,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
452 452
453static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) 453static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
454{ 454{
455 struct channel_path *chp;
455 struct chp_link link; 456 struct chp_link link;
456 struct chp_id chpid; 457 struct chp_id chpid;
457 int status; 458 int status;
@@ -464,10 +465,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
464 chpid.id = sei_area->rsid; 465 chpid.id = sei_area->rsid;
465 /* allocate a new channel path structure, if needed */ 466 /* allocate a new channel path structure, if needed */
466 status = chp_get_status(chpid); 467 status = chp_get_status(chpid);
467 if (status < 0) 468 if (!status)
468 chp_new(chpid);
469 else if (!status)
470 return; 469 return;
470
471 if (status < 0) {
472 chp_new(chpid);
473 } else {
474 chp = chpid_to_chp(chpid);
475 mutex_lock(&chp->lock);
476 chp_update_desc(chp);
477 mutex_unlock(&chp->lock);
478 }
471 memset(&link, 0, sizeof(struct chp_link)); 479 memset(&link, 0, sizeof(struct chp_link));
472 link.chpid = chpid; 480 link.chpid = chpid;
473 if ((sei_area->vf & 0xc0) != 0) { 481 if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index ff6963ad6e39..3c800642134e 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -20,12 +20,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
20 int ccode; 20 int ccode;
21 __u8 lpm; 21 __u8 lpm;
22 unsigned long flags; 22 unsigned long flags;
23 int ret;
23 24
24 sch = private->sch; 25 sch = private->sch;
25 26
26 spin_lock_irqsave(sch->lock, flags); 27 spin_lock_irqsave(sch->lock, flags);
27 private->state = VFIO_CCW_STATE_BUSY; 28 private->state = VFIO_CCW_STATE_BUSY;
28 spin_unlock_irqrestore(sch->lock, flags);
29 29
30 orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm); 30 orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
31 31
@@ -38,10 +38,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
38 * Initialize device status information 38 * Initialize device status information
39 */ 39 */
40 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; 40 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
41 return 0; 41 ret = 0;
42 break;
42 case 1: /* Status pending */ 43 case 1: /* Status pending */
43 case 2: /* Busy */ 44 case 2: /* Busy */
44 return -EBUSY; 45 ret = -EBUSY;
46 break;
45 case 3: /* Device/path not operational */ 47 case 3: /* Device/path not operational */
46 { 48 {
47 lpm = orb->cmd.lpm; 49 lpm = orb->cmd.lpm;
@@ -51,13 +53,16 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
51 sch->lpm = 0; 53 sch->lpm = 0;
52 54
53 if (cio_update_schib(sch)) 55 if (cio_update_schib(sch))
54 return -ENODEV; 56 ret = -ENODEV;
55 57 else
56 return sch->lpm ? -EACCES : -ENODEV; 58 ret = sch->lpm ? -EACCES : -ENODEV;
59 break;
57 } 60 }
58 default: 61 default:
59 return ccode; 62 ret = ccode;
60 } 63 }
64 spin_unlock_irqrestore(sch->lock, flags);
65 return ret;
61} 66}
62 67
63static void fsm_notoper(struct vfio_ccw_private *private, 68static void fsm_notoper(struct vfio_ccw_private *private,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 4326715dc13e..78b98b3e7efa 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -557,7 +557,6 @@ enum qeth_prot_versions {
557enum qeth_cmd_buffer_state { 557enum qeth_cmd_buffer_state {
558 BUF_STATE_FREE, 558 BUF_STATE_FREE,
559 BUF_STATE_LOCKED, 559 BUF_STATE_LOCKED,
560 BUF_STATE_PROCESSED,
561}; 560};
562 561
563enum qeth_cq { 562enum qeth_cq {
@@ -601,7 +600,6 @@ struct qeth_channel {
601 struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO]; 600 struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
602 atomic_t irq_pending; 601 atomic_t irq_pending;
603 int io_buf_no; 602 int io_buf_no;
604 int buf_no;
605}; 603};
606 604
607/** 605/**
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 04fefa5bb08d..dffd820731f2 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -706,7 +706,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
706 qeth_put_reply(reply); 706 qeth_put_reply(reply);
707 } 707 }
708 spin_unlock_irqrestore(&card->lock, flags); 708 spin_unlock_irqrestore(&card->lock, flags);
709 atomic_set(&card->write.irq_pending, 0);
710} 709}
711EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); 710EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
712 711
@@ -818,7 +817,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel)
818 817
819 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) 818 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
820 qeth_release_buffer(channel, &channel->iob[cnt]); 819 qeth_release_buffer(channel, &channel->iob[cnt]);
821 channel->buf_no = 0;
822 channel->io_buf_no = 0; 820 channel->io_buf_no = 0;
823} 821}
824EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); 822EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
@@ -924,7 +922,6 @@ static int qeth_setup_channel(struct qeth_channel *channel)
924 kfree(channel->iob[cnt].data); 922 kfree(channel->iob[cnt].data);
925 return -ENOMEM; 923 return -ENOMEM;
926 } 924 }
927 channel->buf_no = 0;
928 channel->io_buf_no = 0; 925 channel->io_buf_no = 0;
929 atomic_set(&channel->irq_pending, 0); 926 atomic_set(&channel->irq_pending, 0);
930 spin_lock_init(&channel->iob_lock); 927 spin_lock_init(&channel->iob_lock);
@@ -1100,16 +1097,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1100{ 1097{
1101 int rc; 1098 int rc;
1102 int cstat, dstat; 1099 int cstat, dstat;
1103 struct qeth_cmd_buffer *buffer; 1100 struct qeth_cmd_buffer *iob = NULL;
1104 struct qeth_channel *channel; 1101 struct qeth_channel *channel;
1105 struct qeth_card *card; 1102 struct qeth_card *card;
1106 struct qeth_cmd_buffer *iob;
1107 __u8 index;
1108
1109 if (__qeth_check_irb_error(cdev, intparm, irb))
1110 return;
1111 cstat = irb->scsw.cmd.cstat;
1112 dstat = irb->scsw.cmd.dstat;
1113 1103
1114 card = CARD_FROM_CDEV(cdev); 1104 card = CARD_FROM_CDEV(cdev);
1115 if (!card) 1105 if (!card)
@@ -1127,6 +1117,19 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1127 channel = &card->data; 1117 channel = &card->data;
1128 QETH_CARD_TEXT(card, 5, "data"); 1118 QETH_CARD_TEXT(card, 5, "data");
1129 } 1119 }
1120
1121 if (qeth_intparm_is_iob(intparm))
1122 iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
1123
1124 if (__qeth_check_irb_error(cdev, intparm, irb)) {
1125 /* IO was terminated, free its resources. */
1126 if (iob)
1127 qeth_release_buffer(iob->channel, iob);
1128 atomic_set(&channel->irq_pending, 0);
1129 wake_up(&card->wait_q);
1130 return;
1131 }
1132
1130 atomic_set(&channel->irq_pending, 0); 1133 atomic_set(&channel->irq_pending, 0);
1131 1134
1132 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) 1135 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
@@ -1150,6 +1153,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1150 /* we don't have to handle this further */ 1153 /* we don't have to handle this further */
1151 intparm = 0; 1154 intparm = 0;
1152 } 1155 }
1156
1157 cstat = irb->scsw.cmd.cstat;
1158 dstat = irb->scsw.cmd.dstat;
1159
1153 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1160 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1154 (dstat & DEV_STAT_UNIT_CHECK) || 1161 (dstat & DEV_STAT_UNIT_CHECK) ||
1155 (cstat)) { 1162 (cstat)) {
@@ -1182,25 +1189,15 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1182 channel->state = CH_STATE_RCD_DONE; 1189 channel->state = CH_STATE_RCD_DONE;
1183 goto out; 1190 goto out;
1184 } 1191 }
1185 if (intparm) {
1186 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
1187 buffer->state = BUF_STATE_PROCESSED;
1188 }
1189 if (channel == &card->data) 1192 if (channel == &card->data)
1190 return; 1193 return;
1191 if (channel == &card->read && 1194 if (channel == &card->read &&
1192 channel->state == CH_STATE_UP) 1195 channel->state == CH_STATE_UP)
1193 __qeth_issue_next_read(card); 1196 __qeth_issue_next_read(card);
1194 1197
1195 iob = channel->iob; 1198 if (iob && iob->callback)
1196 index = channel->buf_no; 1199 iob->callback(iob->channel, iob);
1197 while (iob[index].state == BUF_STATE_PROCESSED) {
1198 if (iob[index].callback != NULL)
1199 iob[index].callback(channel, iob + index);
1200 1200
1201 index = (index + 1) % QETH_CMD_BUFFER_NO;
1202 }
1203 channel->buf_no = index;
1204out: 1201out:
1205 wake_up(&card->wait_q); 1202 wake_up(&card->wait_q);
1206 return; 1203 return;
@@ -1870,8 +1867,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1870 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); 1867 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1871 QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); 1868 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1872 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1869 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1873 rc = ccw_device_start(channel->ccwdev, 1870 rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1874 &channel->ccw, (addr_t) iob, 0, 0); 1871 (addr_t) iob, 0, 0, QETH_TIMEOUT);
1875 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 1872 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1876 1873
1877 if (rc) { 1874 if (rc) {
@@ -1888,7 +1885,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1888 if (channel->state != CH_STATE_UP) { 1885 if (channel->state != CH_STATE_UP) {
1889 rc = -ETIME; 1886 rc = -ETIME;
1890 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 1887 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
1891 qeth_clear_cmd_buffers(channel);
1892 } else 1888 } else
1893 rc = 0; 1889 rc = 0;
1894 return rc; 1890 return rc;
@@ -1942,8 +1938,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
1942 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); 1938 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1943 QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); 1939 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1944 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1940 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1945 rc = ccw_device_start(channel->ccwdev, 1941 rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1946 &channel->ccw, (addr_t) iob, 0, 0); 1942 (addr_t) iob, 0, 0, QETH_TIMEOUT);
1947 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 1943 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1948 1944
1949 if (rc) { 1945 if (rc) {
@@ -1964,7 +1960,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
1964 QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", 1960 QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
1965 dev_name(&channel->ccwdev->dev)); 1961 dev_name(&channel->ccwdev->dev));
1966 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); 1962 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
1967 qeth_clear_cmd_buffers(channel);
1968 return -ETIME; 1963 return -ETIME;
1969 } 1964 }
1970 return qeth_idx_activate_get_answer(channel, idx_reply_cb); 1965 return qeth_idx_activate_get_answer(channel, idx_reply_cb);
@@ -2166,8 +2161,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2166 2161
2167 QETH_CARD_TEXT(card, 6, "noirqpnd"); 2162 QETH_CARD_TEXT(card, 6, "noirqpnd");
2168 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); 2163 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
2169 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, 2164 rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
2170 (addr_t) iob, 0, 0); 2165 (addr_t) iob, 0, 0, event_timeout);
2171 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); 2166 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
2172 if (rc) { 2167 if (rc) {
2173 QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " 2168 QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
@@ -2199,8 +2194,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2199 } 2194 }
2200 } 2195 }
2201 2196
2202 if (reply->rc == -EIO)
2203 goto error;
2204 rc = reply->rc; 2197 rc = reply->rc;
2205 qeth_put_reply(reply); 2198 qeth_put_reply(reply);
2206 return rc; 2199 return rc;
@@ -2211,10 +2204,6 @@ time_err:
2211 list_del_init(&reply->list); 2204 list_del_init(&reply->list);
2212 spin_unlock_irqrestore(&reply->card->lock, flags); 2205 spin_unlock_irqrestore(&reply->card->lock, flags);
2213 atomic_inc(&reply->received); 2206 atomic_inc(&reply->received);
2214error:
2215 atomic_set(&card->write.irq_pending, 0);
2216 qeth_release_buffer(iob->channel, iob);
2217 card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
2218 rc = reply->rc; 2207 rc = reply->rc;
2219 qeth_put_reply(reply); 2208 qeth_put_reply(reply);
2220 return rc; 2209 return rc;
@@ -3033,28 +3022,23 @@ static int qeth_send_startlan(struct qeth_card *card)
3033 return rc; 3022 return rc;
3034} 3023}
3035 3024
3036static int qeth_default_setadapterparms_cb(struct qeth_card *card, 3025static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3037 struct qeth_reply *reply, unsigned long data)
3038{ 3026{
3039 struct qeth_ipa_cmd *cmd; 3027 if (!cmd->hdr.return_code)
3040
3041 QETH_CARD_TEXT(card, 4, "defadpcb");
3042
3043 cmd = (struct qeth_ipa_cmd *) data;
3044 if (cmd->hdr.return_code == 0)
3045 cmd->hdr.return_code = 3028 cmd->hdr.return_code =
3046 cmd->data.setadapterparms.hdr.return_code; 3029 cmd->data.setadapterparms.hdr.return_code;
3047 return 0; 3030 return cmd->hdr.return_code;
3048} 3031}
3049 3032
3050static int qeth_query_setadapterparms_cb(struct qeth_card *card, 3033static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3051 struct qeth_reply *reply, unsigned long data) 3034 struct qeth_reply *reply, unsigned long data)
3052{ 3035{
3053 struct qeth_ipa_cmd *cmd; 3036 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3054 3037
3055 QETH_CARD_TEXT(card, 3, "quyadpcb"); 3038 QETH_CARD_TEXT(card, 3, "quyadpcb");
3039 if (qeth_setadpparms_inspect_rc(cmd))
3040 return 0;
3056 3041
3057 cmd = (struct qeth_ipa_cmd *) data;
3058 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { 3042 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
3059 card->info.link_type = 3043 card->info.link_type =
3060 cmd->data.setadapterparms.data.query_cmds_supp.lan_type; 3044 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
@@ -3062,7 +3046,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3062 } 3046 }
3063 card->options.adp.supported_funcs = 3047 card->options.adp.supported_funcs =
3064 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; 3048 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
3065 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); 3049 return 0;
3066} 3050}
3067 3051
3068static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 3052static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
@@ -3154,22 +3138,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists);
3154static int qeth_query_switch_attributes_cb(struct qeth_card *card, 3138static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3155 struct qeth_reply *reply, unsigned long data) 3139 struct qeth_reply *reply, unsigned long data)
3156{ 3140{
3157 struct qeth_ipa_cmd *cmd; 3141 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3158 struct qeth_switch_info *sw_info;
3159 struct qeth_query_switch_attributes *attrs; 3142 struct qeth_query_switch_attributes *attrs;
3143 struct qeth_switch_info *sw_info;
3160 3144
3161 QETH_CARD_TEXT(card, 2, "qswiatcb"); 3145 QETH_CARD_TEXT(card, 2, "qswiatcb");
3162 cmd = (struct qeth_ipa_cmd *) data; 3146 if (qeth_setadpparms_inspect_rc(cmd))
3163 sw_info = (struct qeth_switch_info *)reply->param; 3147 return 0;
3164 if (cmd->data.setadapterparms.hdr.return_code == 0) {
3165 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3166 sw_info->capabilities = attrs->capabilities;
3167 sw_info->settings = attrs->settings;
3168 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3169 sw_info->settings);
3170 }
3171 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3172 3148
3149 sw_info = (struct qeth_switch_info *)reply->param;
3150 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3151 sw_info->capabilities = attrs->capabilities;
3152 sw_info->settings = attrs->settings;
3153 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3154 sw_info->settings);
3173 return 0; 3155 return 0;
3174} 3156}
3175 3157
@@ -4207,16 +4189,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4207static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4189static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4208 struct qeth_reply *reply, unsigned long data) 4190 struct qeth_reply *reply, unsigned long data)
4209{ 4191{
4210 struct qeth_ipa_cmd *cmd; 4192 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4211 struct qeth_ipacmd_setadpparms *setparms; 4193 struct qeth_ipacmd_setadpparms *setparms;
4212 4194
4213 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4195 QETH_CARD_TEXT(card, 4, "prmadpcb");
4214 4196
4215 cmd = (struct qeth_ipa_cmd *) data;
4216 setparms = &(cmd->data.setadapterparms); 4197 setparms = &(cmd->data.setadapterparms);
4217 4198 if (qeth_setadpparms_inspect_rc(cmd)) {
4218 qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
4219 if (cmd->hdr.return_code) {
4220 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4199 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4221 setparms->data.mode = SET_PROMISC_MODE_OFF; 4200 setparms->data.mode = SET_PROMISC_MODE_OFF;
4222 } 4201 }
@@ -4286,18 +4265,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats);
4286static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4265static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4287 struct qeth_reply *reply, unsigned long data) 4266 struct qeth_reply *reply, unsigned long data)
4288{ 4267{
4289 struct qeth_ipa_cmd *cmd; 4268 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4290 4269
4291 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4270 QETH_CARD_TEXT(card, 4, "chgmaccb");
4271 if (qeth_setadpparms_inspect_rc(cmd))
4272 return 0;
4292 4273
4293 cmd = (struct qeth_ipa_cmd *) data;
4294 if (!card->options.layer2 || 4274 if (!card->options.layer2 ||
4295 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { 4275 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
4296 ether_addr_copy(card->dev->dev_addr, 4276 ether_addr_copy(card->dev->dev_addr,
4297 cmd->data.setadapterparms.data.change_addr.addr); 4277 cmd->data.setadapterparms.data.change_addr.addr);
4298 card->info.mac_bits |= QETH_LAYER2_MAC_READ; 4278 card->info.mac_bits |= QETH_LAYER2_MAC_READ;
4299 } 4279 }
4300 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
4301 return 0; 4280 return 0;
4302} 4281}
4303 4282
@@ -4328,13 +4307,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4328static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4307static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4329 struct qeth_reply *reply, unsigned long data) 4308 struct qeth_reply *reply, unsigned long data)
4330{ 4309{
4331 struct qeth_ipa_cmd *cmd; 4310 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4332 struct qeth_set_access_ctrl *access_ctrl_req; 4311 struct qeth_set_access_ctrl *access_ctrl_req;
4333 int fallback = *(int *)reply->param; 4312 int fallback = *(int *)reply->param;
4334 4313
4335 QETH_CARD_TEXT(card, 4, "setaccb"); 4314 QETH_CARD_TEXT(card, 4, "setaccb");
4315 if (cmd->hdr.return_code)
4316 return 0;
4317 qeth_setadpparms_inspect_rc(cmd);
4336 4318
4337 cmd = (struct qeth_ipa_cmd *) data;
4338 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4319 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4339 QETH_DBF_TEXT_(SETUP, 2, "setaccb"); 4320 QETH_DBF_TEXT_(SETUP, 2, "setaccb");
4340 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); 4321 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
@@ -4407,7 +4388,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4407 card->options.isolation = card->options.prev_isolation; 4388 card->options.isolation = card->options.prev_isolation;
4408 break; 4389 break;
4409 } 4390 }
4410 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
4411 return 0; 4391 return 0;
4412} 4392}
4413 4393
@@ -4695,14 +4675,15 @@ out:
4695static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4675static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4696 struct qeth_reply *reply, unsigned long data) 4676 struct qeth_reply *reply, unsigned long data)
4697{ 4677{
4698 struct qeth_ipa_cmd *cmd; 4678 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4699 struct qeth_qoat_priv *priv; 4679 struct qeth_qoat_priv *priv;
4700 char *resdata; 4680 char *resdata;
4701 int resdatalen; 4681 int resdatalen;
4702 4682
4703 QETH_CARD_TEXT(card, 3, "qoatcb"); 4683 QETH_CARD_TEXT(card, 3, "qoatcb");
4684 if (qeth_setadpparms_inspect_rc(cmd))
4685 return 0;
4704 4686
4705 cmd = (struct qeth_ipa_cmd *)data;
4706 priv = (struct qeth_qoat_priv *)reply->param; 4687 priv = (struct qeth_qoat_priv *)reply->param;
4707 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4688 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4708 resdata = (char *)data + 28; 4689 resdata = (char *)data + 28;
@@ -4796,21 +4777,18 @@ out:
4796static int qeth_query_card_info_cb(struct qeth_card *card, 4777static int qeth_query_card_info_cb(struct qeth_card *card,
4797 struct qeth_reply *reply, unsigned long data) 4778 struct qeth_reply *reply, unsigned long data)
4798{ 4779{
4799 struct qeth_ipa_cmd *cmd; 4780 struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4781 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4800 struct qeth_query_card_info *card_info; 4782 struct qeth_query_card_info *card_info;
4801 struct carrier_info *carrier_info;
4802 4783
4803 QETH_CARD_TEXT(card, 2, "qcrdincb"); 4784 QETH_CARD_TEXT(card, 2, "qcrdincb");
4804 carrier_info = (struct carrier_info *)reply->param; 4785 if (qeth_setadpparms_inspect_rc(cmd))
4805 cmd = (struct qeth_ipa_cmd *)data; 4786 return 0;
4806 card_info = &cmd->data.setadapterparms.data.card_info;
4807 if (cmd->data.setadapterparms.hdr.return_code == 0) {
4808 carrier_info->card_type = card_info->card_type;
4809 carrier_info->port_mode = card_info->port_mode;
4810 carrier_info->port_speed = card_info->port_speed;
4811 }
4812 4787
4813 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); 4788 card_info = &cmd->data.setadapterparms.data.card_info;
4789 carrier_info->card_type = card_info->card_type;
4790 carrier_info->port_mode = card_info->port_mode;
4791 carrier_info->port_speed = card_info->port_speed;
4814 return 0; 4792 return 0;
4815} 4793}
4816 4794
@@ -4857,7 +4835,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
4857 goto out; 4835 goto out;
4858 } 4836 }
4859 4837
4860 ccw_device_get_id(CARD_DDEV(card), &id); 4838 ccw_device_get_id(CARD_RDEV(card), &id);
4861 request->resp_buf_len = sizeof(*response); 4839 request->resp_buf_len = sizeof(*response);
4862 request->resp_version = DIAG26C_VERSION2; 4840 request->resp_version = DIAG26C_VERSION2;
4863 request->op_code = DIAG26C_GET_MAC; 4841 request->op_code = DIAG26C_GET_MAC;
@@ -6563,10 +6541,14 @@ static int __init qeth_core_init(void)
6563 mutex_init(&qeth_mod_mutex); 6541 mutex_init(&qeth_mod_mutex);
6564 6542
6565 qeth_wq = create_singlethread_workqueue("qeth_wq"); 6543 qeth_wq = create_singlethread_workqueue("qeth_wq");
6544 if (!qeth_wq) {
6545 rc = -ENOMEM;
6546 goto out_err;
6547 }
6566 6548
6567 rc = qeth_register_dbf_views(); 6549 rc = qeth_register_dbf_views();
6568 if (rc) 6550 if (rc)
6569 goto out_err; 6551 goto dbf_err;
6570 qeth_core_root_dev = root_device_register("qeth"); 6552 qeth_core_root_dev = root_device_register("qeth");
6571 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 6553 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
6572 if (rc) 6554 if (rc)
@@ -6603,6 +6585,8 @@ slab_err:
6603 root_device_unregister(qeth_core_root_dev); 6585 root_device_unregister(qeth_core_root_dev);
6604register_err: 6586register_err:
6605 qeth_unregister_dbf_views(); 6587 qeth_unregister_dbf_views();
6588dbf_err:
6589 destroy_workqueue(qeth_wq);
6606out_err: 6590out_err:
6607 pr_err("Initializing the qeth device driver failed\n"); 6591 pr_err("Initializing the qeth device driver failed\n");
6608 return rc; 6592 return rc;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 619f897b4bb0..f4d1ec0b8f5a 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -35,6 +35,18 @@ extern unsigned char IPA_PDU_HEADER[];
35#define QETH_HALT_CHANNEL_PARM -11 35#define QETH_HALT_CHANNEL_PARM -11
36#define QETH_RCD_PARM -12 36#define QETH_RCD_PARM -12
37 37
38static inline bool qeth_intparm_is_iob(unsigned long intparm)
39{
40 switch (intparm) {
41 case QETH_CLEAR_CHANNEL_PARM:
42 case QETH_HALT_CHANNEL_PARM:
43 case QETH_RCD_PARM:
44 case 0:
45 return false;
46 }
47 return true;
48}
49
38/*****************************************************************************/ 50/*****************************************************************************/
39/* IP Assist related definitions */ 51/* IP Assist related definitions */
40/*****************************************************************************/ 52/*****************************************************************************/
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2ad6f12f3d49..b8079f2a65b3 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -121,13 +121,10 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
121 QETH_CARD_TEXT(card, 2, "L2Setmac"); 121 QETH_CARD_TEXT(card, 2, "L2Setmac");
122 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); 122 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
123 if (rc == 0) { 123 if (rc == 0) {
124 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
125 ether_addr_copy(card->dev->dev_addr, mac);
126 dev_info(&card->gdev->dev, 124 dev_info(&card->gdev->dev,
127 "MAC address %pM successfully registered on device %s\n", 125 "MAC address %pM successfully registered on device %s\n",
128 card->dev->dev_addr, card->dev->name); 126 mac, card->dev->name);
129 } else { 127 } else {
130 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
131 switch (rc) { 128 switch (rc) {
132 case -EEXIST: 129 case -EEXIST:
133 dev_warn(&card->gdev->dev, 130 dev_warn(&card->gdev->dev,
@@ -142,19 +139,6 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
142 return rc; 139 return rc;
143} 140}
144 141
145static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
146{
147 int rc;
148
149 QETH_CARD_TEXT(card, 2, "L2Delmac");
150 if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
151 return 0;
152 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC);
153 if (rc == 0)
154 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
155 return rc;
156}
157
158static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) 142static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
159{ 143{
160 enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? 144 enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
@@ -519,6 +503,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
519{ 503{
520 struct sockaddr *addr = p; 504 struct sockaddr *addr = p;
521 struct qeth_card *card = dev->ml_priv; 505 struct qeth_card *card = dev->ml_priv;
506 u8 old_addr[ETH_ALEN];
522 int rc = 0; 507 int rc = 0;
523 508
524 QETH_CARD_TEXT(card, 3, "setmac"); 509 QETH_CARD_TEXT(card, 3, "setmac");
@@ -530,14 +515,35 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
530 return -EOPNOTSUPP; 515 return -EOPNOTSUPP;
531 } 516 }
532 QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN); 517 QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
518 if (!is_valid_ether_addr(addr->sa_data))
519 return -EADDRNOTAVAIL;
520
533 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { 521 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
534 QETH_CARD_TEXT(card, 3, "setmcREC"); 522 QETH_CARD_TEXT(card, 3, "setmcREC");
535 return -ERESTARTSYS; 523 return -ERESTARTSYS;
536 } 524 }
537 rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); 525
538 if (!rc || (rc == -ENOENT)) 526 if (!qeth_card_hw_is_reachable(card)) {
539 rc = qeth_l2_send_setmac(card, addr->sa_data); 527 ether_addr_copy(dev->dev_addr, addr->sa_data);
540 return rc ? -EINVAL : 0; 528 return 0;
529 }
530
531 /* don't register the same address twice */
532 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
533 (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
534 return 0;
535
536 /* add the new address, switch over, drop the old */
537 rc = qeth_l2_send_setmac(card, addr->sa_data);
538 if (rc)
539 return rc;
540 ether_addr_copy(old_addr, dev->dev_addr);
541 ether_addr_copy(dev->dev_addr, addr->sa_data);
542
543 if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
544 qeth_l2_remove_mac(card, old_addr);
545 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
546 return 0;
541} 547}
542 548
543static void qeth_promisc_to_bridge(struct qeth_card *card) 549static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -1067,8 +1073,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1067 goto out_remove; 1073 goto out_remove;
1068 } 1074 }
1069 1075
1070 if (card->info.type != QETH_CARD_TYPE_OSN) 1076 if (card->info.type != QETH_CARD_TYPE_OSN &&
1071 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); 1077 !qeth_l2_send_setmac(card, card->dev->dev_addr))
1078 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
1072 1079
1073 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { 1080 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
1074 if (card->info.hwtrap && 1081 if (card->info.hwtrap &&
@@ -1338,8 +1345,8 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
1338 qeth_prepare_control_data(card, len, iob); 1345 qeth_prepare_control_data(card, len, iob);
1339 QETH_CARD_TEXT(card, 6, "osnoirqp"); 1346 QETH_CARD_TEXT(card, 6, "osnoirqp");
1340 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); 1347 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1341 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, 1348 rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
1342 (addr_t) iob, 0, 0); 1349 (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
1343 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); 1350 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1344 if (rc) { 1351 if (rc) {
1345 QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " 1352 QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index abddde11982b..98597b59c12a 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -296,7 +296,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
296 "Number of Abort FW Timeouts: %lld\n" 296 "Number of Abort FW Timeouts: %lld\n"
297 "Number of Abort IO NOT Found: %lld\n" 297 "Number of Abort IO NOT Found: %lld\n"
298 298
299 "Abord issued times: \n" 299 "Abort issued times: \n"
300 " < 6 sec : %lld\n" 300 " < 6 sec : %lld\n"
301 " 6 sec - 20 sec : %lld\n" 301 " 6 sec - 20 sec : %lld\n"
302 " 20 sec - 30 sec : %lld\n" 302 " 20 sec - 30 sec : %lld\n"
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index ce97cde3b41c..f4d988dd1e9d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1124,12 +1124,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
1124 goto fail_fw_init; 1124 goto fail_fw_init;
1125 } 1125 }
1126 1126
1127 ret = 0; 1127 return 0;
1128 1128
1129fail_fw_init: 1129fail_fw_init:
1130 dev_err(&instance->pdev->dev, 1130 dev_err(&instance->pdev->dev,
1131 "Init cmd return status %s for SCSI host %d\n", 1131 "Init cmd return status FAILED for SCSI host %d\n",
1132 ret ? "FAILED" : "SUCCESS", instance->host->host_no); 1132 instance->host->host_no);
1133 1133
1134 return ret; 1134 return ret;
1135} 1135}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 9ef5e3b810f6..656c98e116a9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -234,11 +234,13 @@ static const char *sdebug_version_date = "20180128";
234#define F_INV_OP 0x200 234#define F_INV_OP 0x200
235#define F_FAKE_RW 0x400 235#define F_FAKE_RW 0x400
236#define F_M_ACCESS 0x800 /* media access */ 236#define F_M_ACCESS 0x800 /* media access */
237#define F_LONG_DELAY 0x1000 237#define F_SSU_DELAY 0x1000
238#define F_SYNC_DELAY 0x2000
238 239
239#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) 240#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
240#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW) 241#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
241#define FF_SA (F_SA_HIGH | F_SA_LOW) 242#define FF_SA (F_SA_HIGH | F_SA_LOW)
243#define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
242 244
243#define SDEBUG_MAX_PARTS 4 245#define SDEBUG_MAX_PARTS 4
244 246
@@ -510,7 +512,7 @@ static const struct opcode_info_t release_iarr[] = {
510}; 512};
511 513
512static const struct opcode_info_t sync_cache_iarr[] = { 514static const struct opcode_info_t sync_cache_iarr[] = {
513 {0, 0x91, 0, F_LONG_DELAY | F_M_ACCESS, resp_sync_cache, NULL, 515 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
514 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 516 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
515 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */ 517 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
516}; 518};
@@ -553,7 +555,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
553 resp_write_dt0, write_iarr, /* WRITE(16) */ 555 resp_write_dt0, write_iarr, /* WRITE(16) */
554 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 556 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
555 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, 557 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
556 {0, 0x1b, 0, F_LONG_DELAY, resp_start_stop, NULL,/* START STOP UNIT */ 558 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
557 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 559 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
558 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN, 560 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
559 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */ 561 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
@@ -606,7 +608,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
606 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */ 608 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
607 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 609 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
608 0, 0, 0, 0, 0} }, 610 0, 0, 0, 0, 0} },
609 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_LONG_DELAY | F_M_ACCESS, 611 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
610 resp_sync_cache, sync_cache_iarr, 612 resp_sync_cache, sync_cache_iarr,
611 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 613 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
612 0, 0, 0, 0} }, /* SYNC_CACHE (10) */ 614 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
@@ -667,6 +669,7 @@ static bool sdebug_strict = DEF_STRICT;
667static bool sdebug_any_injecting_opt; 669static bool sdebug_any_injecting_opt;
668static bool sdebug_verbose; 670static bool sdebug_verbose;
669static bool have_dif_prot; 671static bool have_dif_prot;
672static bool write_since_sync;
670static bool sdebug_statistics = DEF_STATISTICS; 673static bool sdebug_statistics = DEF_STATISTICS;
671 674
672static unsigned int sdebug_store_sectors; 675static unsigned int sdebug_store_sectors;
@@ -1607,6 +1610,7 @@ static int resp_start_stop(struct scsi_cmnd *scp,
1607{ 1610{
1608 unsigned char *cmd = scp->cmnd; 1611 unsigned char *cmd = scp->cmnd;
1609 int power_cond, stop; 1612 int power_cond, stop;
1613 bool changing;
1610 1614
1611 power_cond = (cmd[4] & 0xf0) >> 4; 1615 power_cond = (cmd[4] & 0xf0) >> 4;
1612 if (power_cond) { 1616 if (power_cond) {
@@ -1614,8 +1618,12 @@ static int resp_start_stop(struct scsi_cmnd *scp,
1614 return check_condition_result; 1618 return check_condition_result;
1615 } 1619 }
1616 stop = !(cmd[4] & 1); 1620 stop = !(cmd[4] & 1);
1621 changing = atomic_read(&devip->stopped) == !stop;
1617 atomic_xchg(&devip->stopped, stop); 1622 atomic_xchg(&devip->stopped, stop);
1618 return (cmd[1] & 0x1) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */ 1623 if (!changing || cmd[1] & 0x1) /* state unchanged or IMMED set */
1624 return SDEG_RES_IMMED_MASK;
1625 else
1626 return 0;
1619} 1627}
1620 1628
1621static sector_t get_sdebug_capacity(void) 1629static sector_t get_sdebug_capacity(void)
@@ -2473,6 +2481,7 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2473 if (do_write) { 2481 if (do_write) {
2474 sdb = scsi_out(scmd); 2482 sdb = scsi_out(scmd);
2475 dir = DMA_TO_DEVICE; 2483 dir = DMA_TO_DEVICE;
2484 write_since_sync = true;
2476 } else { 2485 } else {
2477 sdb = scsi_in(scmd); 2486 sdb = scsi_in(scmd);
2478 dir = DMA_FROM_DEVICE; 2487 dir = DMA_FROM_DEVICE;
@@ -3583,6 +3592,7 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
3583static int resp_sync_cache(struct scsi_cmnd *scp, 3592static int resp_sync_cache(struct scsi_cmnd *scp,
3584 struct sdebug_dev_info *devip) 3593 struct sdebug_dev_info *devip)
3585{ 3594{
3595 int res = 0;
3586 u64 lba; 3596 u64 lba;
3587 u32 num_blocks; 3597 u32 num_blocks;
3588 u8 *cmd = scp->cmnd; 3598 u8 *cmd = scp->cmnd;
@@ -3598,7 +3608,11 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
3598 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); 3608 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3599 return check_condition_result; 3609 return check_condition_result;
3600 } 3610 }
3601 return (cmd[1] & 0x2) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */ 3611 if (!write_since_sync || cmd[1] & 0x2)
3612 res = SDEG_RES_IMMED_MASK;
3613 else /* delay if write_since_sync and IMMED clear */
3614 write_since_sync = false;
3615 return res;
3602} 3616}
3603 3617
3604#define RL_BUCKET_ELEMS 8 3618#define RL_BUCKET_ELEMS 8
@@ -5777,13 +5791,14 @@ fini:
5777 return schedule_resp(scp, devip, errsts, pfp, 0, 0); 5791 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
5778 else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) { 5792 else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) {
5779 /* 5793 /*
5780 * If any delay is active, want F_LONG_DELAY to be at least 1 5794 * If any delay is active, for F_SSU_DELAY want at least 1
5781 * second and if sdebug_jdelay>0 want a long delay of that 5795 * second and if sdebug_jdelay>0 want a long delay of that
5782 * many seconds. 5796 * many seconds; for F_SYNC_DELAY want 1/20 of that.
5783 */ 5797 */
5784 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay; 5798 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
5799 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
5785 5800
5786 jdelay = mult_frac(USER_HZ * jdelay, HZ, USER_HZ); 5801 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
5787 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0); 5802 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
5788 } else 5803 } else
5789 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay, 5804 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f4b52b44b966..65f6c94f2e9b 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2322,6 +2322,12 @@ iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
2322 return nlmsg_multicast(nls, skb, 0, group, gfp); 2322 return nlmsg_multicast(nls, skb, 0, group, gfp);
2323} 2323}
2324 2324
2325static int
2326iscsi_unicast_skb(struct sk_buff *skb, u32 portid)
2327{
2328 return nlmsg_unicast(nls, skb, portid);
2329}
2330
2325int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 2331int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
2326 char *data, uint32_t data_size) 2332 char *data, uint32_t data_size)
2327{ 2333{
@@ -2524,14 +2530,11 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
2524EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); 2530EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
2525 2531
2526static int 2532static int
2527iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, 2533iscsi_if_send_reply(u32 portid, int type, void *payload, int size)
2528 void *payload, int size)
2529{ 2534{
2530 struct sk_buff *skb; 2535 struct sk_buff *skb;
2531 struct nlmsghdr *nlh; 2536 struct nlmsghdr *nlh;
2532 int len = nlmsg_total_size(size); 2537 int len = nlmsg_total_size(size);
2533 int flags = multi ? NLM_F_MULTI : 0;
2534 int t = done ? NLMSG_DONE : type;
2535 2538
2536 skb = alloc_skb(len, GFP_ATOMIC); 2539 skb = alloc_skb(len, GFP_ATOMIC);
2537 if (!skb) { 2540 if (!skb) {
@@ -2539,10 +2542,9 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
2539 return -ENOMEM; 2542 return -ENOMEM;
2540 } 2543 }
2541 2544
2542 nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0); 2545 nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0);
2543 nlh->nlmsg_flags = flags;
2544 memcpy(nlmsg_data(nlh), payload, size); 2546 memcpy(nlmsg_data(nlh), payload, size);
2545 return iscsi_multicast_skb(skb, group, GFP_ATOMIC); 2547 return iscsi_unicast_skb(skb, portid);
2546} 2548}
2547 2549
2548static int 2550static int
@@ -3470,6 +3472,7 @@ static int
3470iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) 3472iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3471{ 3473{
3472 int err = 0; 3474 int err = 0;
3475 u32 portid;
3473 struct iscsi_uevent *ev = nlmsg_data(nlh); 3476 struct iscsi_uevent *ev = nlmsg_data(nlh);
3474 struct iscsi_transport *transport = NULL; 3477 struct iscsi_transport *transport = NULL;
3475 struct iscsi_internal *priv; 3478 struct iscsi_internal *priv;
@@ -3490,10 +3493,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3490 if (!try_module_get(transport->owner)) 3493 if (!try_module_get(transport->owner))
3491 return -EINVAL; 3494 return -EINVAL;
3492 3495
3496 portid = NETLINK_CB(skb).portid;
3497
3493 switch (nlh->nlmsg_type) { 3498 switch (nlh->nlmsg_type) {
3494 case ISCSI_UEVENT_CREATE_SESSION: 3499 case ISCSI_UEVENT_CREATE_SESSION:
3495 err = iscsi_if_create_session(priv, ep, ev, 3500 err = iscsi_if_create_session(priv, ep, ev,
3496 NETLINK_CB(skb).portid, 3501 portid,
3497 ev->u.c_session.initial_cmdsn, 3502 ev->u.c_session.initial_cmdsn,
3498 ev->u.c_session.cmds_max, 3503 ev->u.c_session.cmds_max,
3499 ev->u.c_session.queue_depth); 3504 ev->u.c_session.queue_depth);
@@ -3506,7 +3511,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3506 } 3511 }
3507 3512
3508 err = iscsi_if_create_session(priv, ep, ev, 3513 err = iscsi_if_create_session(priv, ep, ev,
3509 NETLINK_CB(skb).portid, 3514 portid,
3510 ev->u.c_bound_session.initial_cmdsn, 3515 ev->u.c_bound_session.initial_cmdsn,
3511 ev->u.c_bound_session.cmds_max, 3516 ev->u.c_bound_session.cmds_max,
3512 ev->u.c_bound_session.queue_depth); 3517 ev->u.c_bound_session.queue_depth);
@@ -3664,6 +3669,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3664static void 3669static void
3665iscsi_if_rx(struct sk_buff *skb) 3670iscsi_if_rx(struct sk_buff *skb)
3666{ 3671{
3672 u32 portid = NETLINK_CB(skb).portid;
3673
3667 mutex_lock(&rx_queue_mutex); 3674 mutex_lock(&rx_queue_mutex);
3668 while (skb->len >= NLMSG_HDRLEN) { 3675 while (skb->len >= NLMSG_HDRLEN) {
3669 int err; 3676 int err;
@@ -3699,8 +3706,8 @@ iscsi_if_rx(struct sk_buff *skb)
3699 break; 3706 break;
3700 if (ev->type == ISCSI_UEVENT_GET_CHAP && !err) 3707 if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
3701 break; 3708 break;
3702 err = iscsi_if_send_reply(group, nlh->nlmsg_seq, 3709 err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
3703 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); 3710 ev, sizeof(*ev));
3704 } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); 3711 } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
3705 skb_pull(skb, rlen); 3712 skb_pull(skb, rlen);
3706 } 3713 }
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a6201e696ab9..9421d9877730 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2121,6 +2121,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
2121 break; /* standby */ 2121 break; /* standby */
2122 if (sshdr.asc == 4 && sshdr.ascq == 0xc) 2122 if (sshdr.asc == 4 && sshdr.ascq == 0xc)
2123 break; /* unavailable */ 2123 break; /* unavailable */
2124 if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
2125 break; /* sanitize in progress */
2124 /* 2126 /*
2125 * Issue command to spin up drive when not ready 2127 * Issue command to spin up drive when not ready
2126 */ 2128 */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 41df75eea57b..210407cd2341 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -400,8 +400,10 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
400 * 400 *
401 * Check that all zones of the device are equal. The last zone can however 401 * Check that all zones of the device are equal. The last zone can however
402 * be smaller. The zone size must also be a power of two number of LBAs. 402 * be smaller. The zone size must also be a power of two number of LBAs.
403 *
404 * Returns the zone size in bytes upon success or an error code upon failure.
403 */ 405 */
404static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) 406static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
405{ 407{
406 u64 zone_blocks = 0; 408 u64 zone_blocks = 0;
407 sector_t block = 0; 409 sector_t block = 0;
@@ -412,8 +414,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
412 int ret; 414 int ret;
413 u8 same; 415 u8 same;
414 416
415 sdkp->zone_blocks = 0;
416
417 /* Get a buffer */ 417 /* Get a buffer */
418 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL); 418 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
419 if (!buf) 419 if (!buf)
@@ -445,16 +445,17 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
445 445
446 /* Parse zone descriptors */ 446 /* Parse zone descriptors */
447 while (rec < buf + buf_len) { 447 while (rec < buf + buf_len) {
448 zone_blocks = get_unaligned_be64(&rec[8]); 448 u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
449 if (sdkp->zone_blocks == 0) { 449
450 sdkp->zone_blocks = zone_blocks; 450 if (zone_blocks == 0) {
451 } else if (zone_blocks != sdkp->zone_blocks && 451 zone_blocks = this_zone_blocks;
452 (block + zone_blocks < sdkp->capacity 452 } else if (this_zone_blocks != zone_blocks &&
453 || zone_blocks > sdkp->zone_blocks)) { 453 (block + this_zone_blocks < sdkp->capacity
454 zone_blocks = 0; 454 || this_zone_blocks > zone_blocks)) {
455 this_zone_blocks = 0;
455 goto out; 456 goto out;
456 } 457 }
457 block += zone_blocks; 458 block += this_zone_blocks;
458 rec += 64; 459 rec += 64;
459 } 460 }
460 461
@@ -467,8 +468,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
467 468
468 } while (block < sdkp->capacity); 469 } while (block < sdkp->capacity);
469 470
470 zone_blocks = sdkp->zone_blocks;
471
472out: 471out:
473 if (!zone_blocks) { 472 if (!zone_blocks) {
474 if (sdkp->first_scan) 473 if (sdkp->first_scan)
@@ -488,8 +487,7 @@ out:
488 "Zone size too large\n"); 487 "Zone size too large\n");
489 ret = -ENODEV; 488 ret = -ENODEV;
490 } else { 489 } else {
491 sdkp->zone_blocks = zone_blocks; 490 ret = zone_blocks;
492 sdkp->zone_shift = ilog2(zone_blocks);
493 } 491 }
494 492
495out_free: 493out_free:
@@ -500,15 +498,14 @@ out_free:
500 498
501/** 499/**
502 * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone). 500 * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
503 * @sdkp: The disk of the bitmap 501 * @nr_zones: Number of zones to allocate space for.
502 * @numa_node: NUMA node to allocate the memory from.
504 */ 503 */
505static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp) 504static inline unsigned long *
505sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
506{ 506{
507 struct request_queue *q = sdkp->disk->queue; 507 return kzalloc_node(BITS_TO_LONGS(nr_zones) * sizeof(unsigned long),
508 508 GFP_KERNEL, numa_node);
509 return kzalloc_node(BITS_TO_LONGS(sdkp->nr_zones)
510 * sizeof(unsigned long),
511 GFP_KERNEL, q->node);
512} 509}
513 510
514/** 511/**
@@ -516,6 +513,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
516 * @sdkp: disk used 513 * @sdkp: disk used
517 * @buf: report reply buffer 514 * @buf: report reply buffer
518 * @buflen: length of @buf 515 * @buflen: length of @buf
516 * @zone_shift: logarithm base 2 of the number of blocks in a zone
519 * @seq_zones_bitmap: bitmap of sequential zones to set 517 * @seq_zones_bitmap: bitmap of sequential zones to set
520 * 518 *
521 * Parse reported zone descriptors in @buf to identify sequential zones and 519 * Parse reported zone descriptors in @buf to identify sequential zones and
@@ -525,7 +523,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
525 * Return the LBA after the last zone reported. 523 * Return the LBA after the last zone reported.
526 */ 524 */
527static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf, 525static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
528 unsigned int buflen, 526 unsigned int buflen, u32 zone_shift,
529 unsigned long *seq_zones_bitmap) 527 unsigned long *seq_zones_bitmap)
530{ 528{
531 sector_t lba, next_lba = sdkp->capacity; 529 sector_t lba, next_lba = sdkp->capacity;
@@ -544,7 +542,7 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
544 if (type != ZBC_ZONE_TYPE_CONV && 542 if (type != ZBC_ZONE_TYPE_CONV &&
545 cond != ZBC_ZONE_COND_READONLY && 543 cond != ZBC_ZONE_COND_READONLY &&
546 cond != ZBC_ZONE_COND_OFFLINE) 544 cond != ZBC_ZONE_COND_OFFLINE)
547 set_bit(lba >> sdkp->zone_shift, seq_zones_bitmap); 545 set_bit(lba >> zone_shift, seq_zones_bitmap);
548 next_lba = lba + get_unaligned_be64(&rec[8]); 546 next_lba = lba + get_unaligned_be64(&rec[8]);
549 rec += 64; 547 rec += 64;
550 } 548 }
@@ -553,12 +551,16 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
553} 551}
554 552
555/** 553/**
556 * sd_zbc_setup_seq_zones_bitmap - Initialize the disk seq zone bitmap. 554 * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
557 * @sdkp: target disk 555 * @sdkp: target disk
556 * @zone_shift: logarithm base 2 of the number of blocks in a zone
557 * @nr_zones: number of zones to set up a seq zone bitmap for
558 * 558 *
559 * Allocate a zone bitmap and initialize it by identifying sequential zones. 559 * Allocate a zone bitmap and initialize it by identifying sequential zones.
560 */ 560 */
561static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp) 561static unsigned long *
562sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
563 u32 nr_zones)
562{ 564{
563 struct request_queue *q = sdkp->disk->queue; 565 struct request_queue *q = sdkp->disk->queue;
564 unsigned long *seq_zones_bitmap; 566 unsigned long *seq_zones_bitmap;
@@ -566,9 +568,9 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
566 unsigned char *buf; 568 unsigned char *buf;
567 int ret = -ENOMEM; 569 int ret = -ENOMEM;
568 570
569 seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(sdkp); 571 seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
570 if (!seq_zones_bitmap) 572 if (!seq_zones_bitmap)
571 return -ENOMEM; 573 return ERR_PTR(-ENOMEM);
572 574
573 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL); 575 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
574 if (!buf) 576 if (!buf)
@@ -579,7 +581,7 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
579 if (ret) 581 if (ret)
580 goto out; 582 goto out;
581 lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 583 lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
582 seq_zones_bitmap); 584 zone_shift, seq_zones_bitmap);
583 } 585 }
584 586
585 if (lba != sdkp->capacity) { 587 if (lba != sdkp->capacity) {
@@ -591,12 +593,9 @@ out:
591 kfree(buf); 593 kfree(buf);
592 if (ret) { 594 if (ret) {
593 kfree(seq_zones_bitmap); 595 kfree(seq_zones_bitmap);
594 return ret; 596 return ERR_PTR(ret);
595 } 597 }
596 598 return seq_zones_bitmap;
597 q->seq_zones_bitmap = seq_zones_bitmap;
598
599 return 0;
600} 599}
601 600
602static void sd_zbc_cleanup(struct scsi_disk *sdkp) 601static void sd_zbc_cleanup(struct scsi_disk *sdkp)
@@ -612,44 +611,64 @@ static void sd_zbc_cleanup(struct scsi_disk *sdkp)
612 q->nr_zones = 0; 611 q->nr_zones = 0;
613} 612}
614 613
615static int sd_zbc_setup(struct scsi_disk *sdkp) 614static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
616{ 615{
617 struct request_queue *q = sdkp->disk->queue; 616 struct request_queue *q = sdkp->disk->queue;
617 u32 zone_shift = ilog2(zone_blocks);
618 u32 nr_zones;
618 int ret; 619 int ret;
619 620
620 /* READ16/WRITE16 is mandatory for ZBC disks */
621 sdkp->device->use_16_for_rw = 1;
622 sdkp->device->use_10_for_rw = 0;
623
624 /* chunk_sectors indicates the zone size */ 621 /* chunk_sectors indicates the zone size */
625 blk_queue_chunk_sectors(sdkp->disk->queue, 622 blk_queue_chunk_sectors(q,
626 logical_to_sectors(sdkp->device, sdkp->zone_blocks)); 623 logical_to_sectors(sdkp->device, zone_blocks));
627 sdkp->nr_zones = 624 nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
628 round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
629 625
630 /* 626 /*
631 * Initialize the device request queue information if the number 627 * Initialize the device request queue information if the number
632 * of zones changed. 628 * of zones changed.
633 */ 629 */
634 if (sdkp->nr_zones != q->nr_zones) { 630 if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
635 631 unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
636 sd_zbc_cleanup(sdkp); 632 size_t zone_bitmap_size;
637 633
638 q->nr_zones = sdkp->nr_zones; 634 if (nr_zones) {
639 if (sdkp->nr_zones) { 635 seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
640 q->seq_zones_wlock = sd_zbc_alloc_zone_bitmap(sdkp); 636 q->node);
641 if (!q->seq_zones_wlock) { 637 if (!seq_zones_wlock) {
642 ret = -ENOMEM; 638 ret = -ENOMEM;
643 goto err; 639 goto err;
644 } 640 }
645 641
646 ret = sd_zbc_setup_seq_zones_bitmap(sdkp); 642 seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
647 if (ret) { 643 zone_shift, nr_zones);
648 sd_zbc_cleanup(sdkp); 644 if (IS_ERR(seq_zones_bitmap)) {
645 ret = PTR_ERR(seq_zones_bitmap);
646 kfree(seq_zones_wlock);
649 goto err; 647 goto err;
650 } 648 }
651 } 649 }
652 650 zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
651 sizeof(unsigned long);
652 blk_mq_freeze_queue(q);
653 if (q->nr_zones != nr_zones) {
654 /* READ16/WRITE16 is mandatory for ZBC disks */
655 sdkp->device->use_16_for_rw = 1;
656 sdkp->device->use_10_for_rw = 0;
657
658 sdkp->zone_blocks = zone_blocks;
659 sdkp->zone_shift = zone_shift;
660 sdkp->nr_zones = nr_zones;
661 q->nr_zones = nr_zones;
662 swap(q->seq_zones_wlock, seq_zones_wlock);
663 swap(q->seq_zones_bitmap, seq_zones_bitmap);
664 } else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
665 zone_bitmap_size) != 0) {
666 memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
667 zone_bitmap_size);
668 }
669 blk_mq_unfreeze_queue(q);
670 kfree(seq_zones_wlock);
671 kfree(seq_zones_bitmap);
653 } 672 }
654 673
655 return 0; 674 return 0;
@@ -661,6 +680,7 @@ err:
661 680
662int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) 681int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
663{ 682{
683 int64_t zone_blocks;
664 int ret; 684 int ret;
665 685
666 if (!sd_is_zoned(sdkp)) 686 if (!sd_is_zoned(sdkp))
@@ -697,12 +717,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
697 * Check zone size: only devices with a constant zone size (except 717 * Check zone size: only devices with a constant zone size (except
698 * an eventual last runt zone) that is a power of 2 are supported. 718 * an eventual last runt zone) that is a power of 2 are supported.
699 */ 719 */
700 ret = sd_zbc_check_zone_size(sdkp); 720 zone_blocks = sd_zbc_check_zone_size(sdkp);
701 if (ret) 721 ret = -EFBIG;
722 if (zone_blocks != (u32)zone_blocks)
723 goto err;
724 ret = zone_blocks;
725 if (ret < 0)
702 goto err; 726 goto err;
703 727
704 /* The drive satisfies the kernel restrictions: set it up */ 728 /* The drive satisfies the kernel restrictions: set it up */
705 ret = sd_zbc_setup(sdkp); 729 ret = sd_zbc_setup(sdkp, zone_blocks);
706 if (ret) 730 if (ret)
707 goto err; 731 goto err;
708 732
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index c5b1bf1cadcb..00e79057f870 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -276,6 +276,35 @@ static inline void ufshcd_remove_non_printable(char *val)
276 *val = ' '; 276 *val = ' ';
277} 277}
278 278
279static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
280 const char *str)
281{
282 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
283
284 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
285}
286
287static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
288 const char *str)
289{
290 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
291
292 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
293}
294
295static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
296 const char *str)
297{
298 struct utp_task_req_desc *descp;
299 struct utp_upiu_task_req *task_req;
300 int off = (int)tag - hba->nutrs;
301
302 descp = &hba->utmrdl_base_addr[off];
303 task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
304 trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
305 &task_req->input_param1);
306}
307
279static void ufshcd_add_command_trace(struct ufs_hba *hba, 308static void ufshcd_add_command_trace(struct ufs_hba *hba,
280 unsigned int tag, const char *str) 309 unsigned int tag, const char *str)
281{ 310{
@@ -285,6 +314,9 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
285 struct ufshcd_lrb *lrbp; 314 struct ufshcd_lrb *lrbp;
286 int transfer_len = -1; 315 int transfer_len = -1;
287 316
317 /* trace UPIU also */
318 ufshcd_add_cmd_upiu_trace(hba, tag, str);
319
288 if (!trace_ufshcd_command_enabled()) 320 if (!trace_ufshcd_command_enabled())
289 return; 321 return;
290 322
@@ -2550,6 +2582,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2550 2582
2551 hba->dev_cmd.complete = &wait; 2583 hba->dev_cmd.complete = &wait;
2552 2584
2585 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2553 /* Make sure descriptors are ready before ringing the doorbell */ 2586 /* Make sure descriptors are ready before ringing the doorbell */
2554 wmb(); 2587 wmb();
2555 spin_lock_irqsave(hba->host->host_lock, flags); 2588 spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2559,6 +2592,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2559 2592
2560 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); 2593 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2561 2594
2595 ufshcd_add_query_upiu_trace(hba, tag,
2596 err ? "query_complete_err" : "query_complete");
2597
2562out_put_tag: 2598out_put_tag:
2563 ufshcd_put_dev_cmd_tag(hba, tag); 2599 ufshcd_put_dev_cmd_tag(hba, tag);
2564 wake_up(&hba->dev_cmd.tag_wq); 2600 wake_up(&hba->dev_cmd.tag_wq);
@@ -5443,11 +5479,14 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5443 5479
5444 spin_unlock_irqrestore(host->host_lock, flags); 5480 spin_unlock_irqrestore(host->host_lock, flags);
5445 5481
5482 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5483
5446 /* wait until the task management command is completed */ 5484 /* wait until the task management command is completed */
5447 err = wait_event_timeout(hba->tm_wq, 5485 err = wait_event_timeout(hba->tm_wq,
5448 test_bit(free_slot, &hba->tm_condition), 5486 test_bit(free_slot, &hba->tm_condition),
5449 msecs_to_jiffies(TM_CMD_TIMEOUT)); 5487 msecs_to_jiffies(TM_CMD_TIMEOUT));
5450 if (!err) { 5488 if (!err) {
5489 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
5451 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", 5490 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5452 __func__, tm_function); 5491 __func__, tm_function);
5453 if (ufshcd_clear_tm_cmd(hba, free_slot)) 5492 if (ufshcd_clear_tm_cmd(hba, free_slot))
@@ -5456,6 +5495,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5456 err = -ETIMEDOUT; 5495 err = -ETIMEDOUT;
5457 } else { 5496 } else {
5458 err = ufshcd_task_req_compl(hba, free_slot, tm_response); 5497 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
5498 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
5459 } 5499 }
5460 5500
5461 clear_bit(free_slot, &hba->tm_condition); 5501 clear_bit(free_slot, &hba->tm_condition);
diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c
index fe96a8b956fb..f7ed1187518b 100644
--- a/drivers/soc/bcm/raspberrypi-power.c
+++ b/drivers/soc/bcm/raspberrypi-power.c
@@ -45,7 +45,7 @@ struct rpi_power_domains {
45struct rpi_power_domain_packet { 45struct rpi_power_domain_packet {
46 u32 domain; 46 u32 domain;
47 u32 on; 47 u32 on;
48} __packet; 48};
49 49
50/* 50/*
51 * Asks the firmware to enable or disable power on a specific power 51 * Asks the firmware to enable or disable power on a specific power
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 0d99b242e82e..6cb933ecc084 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -890,6 +890,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
890 bytes = min(bytes, data_len); 890 bytes = min(bytes, data_len);
891 891
892 if (!bio) { 892 if (!bio) {
893new_bio:
893 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 894 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
894 nr_pages -= nr_vecs; 895 nr_pages -= nr_vecs;
895 /* 896 /*
@@ -931,6 +932,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
931 * be allocated with pscsi_get_bio() above. 932 * be allocated with pscsi_get_bio() above.
932 */ 933 */
933 bio = NULL; 934 bio = NULL;
935 goto new_bio;
934 } 936 }
935 937
936 data_len -= bytes; 938 data_len -= bytes;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e3986f4b3461..ebc34a5686dc 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -9,6 +9,9 @@
9struct blk_mq_tags; 9struct blk_mq_tags;
10struct blk_flush_queue; 10struct blk_flush_queue;
11 11
12/**
13 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
14 */
12struct blk_mq_hw_ctx { 15struct blk_mq_hw_ctx {
13 struct { 16 struct {
14 spinlock_t lock; 17 spinlock_t lock;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9af3e0f430bc..5c4eee043191 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -605,6 +605,11 @@ struct request_queue {
605 * initialized by the low level device driver (e.g. scsi/sd.c). 605 * initialized by the low level device driver (e.g. scsi/sd.c).
606 * Stacking drivers (device mappers) may or may not initialize 606 * Stacking drivers (device mappers) may or may not initialize
607 * these fields. 607 * these fields.
608 *
609 * Reads of this information must be protected with blk_queue_enter() /
610 * blk_queue_exit(). Modifying this information is only allowed while
611 * no requests are being processed. See also blk_mq_freeze_queue() and
612 * blk_mq_unfreeze_queue().
608 */ 613 */
609 unsigned int nr_zones; 614 unsigned int nr_zones;
610 unsigned long *seq_zones_bitmap; 615 unsigned long *seq_zones_bitmap;
@@ -737,6 +742,7 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
737#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 742#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
738#define blk_queue_preempt_only(q) \ 743#define blk_queue_preempt_only(q) \
739 test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags) 744 test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
745#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
740 746
741extern int blk_set_preempt_only(struct request_queue *q); 747extern int blk_set_preempt_only(struct request_queue *q);
742extern void blk_clear_preempt_only(struct request_queue *q); 748extern void blk_clear_preempt_only(struct request_queue *q);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 95a7abd0ee92..486e65e3db26 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -339,8 +339,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
339void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, 339void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
340 struct bpf_prog *old_prog); 340 struct bpf_prog *old_prog);
341int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, 341int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
342 __u32 __user *prog_ids, u32 request_cnt, 342 u32 *prog_ids, u32 request_cnt,
343 __u32 __user *prog_cnt); 343 u32 *prog_cnt);
344int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, 344int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
345 struct bpf_prog *exclude_prog, 345 struct bpf_prog *exclude_prog,
346 struct bpf_prog *include_prog, 346 struct bpf_prog *include_prog,
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index ebe41811ed34..b32cd2062f18 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -310,6 +310,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
310 * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS 310 * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
311 * instead of the latter), any change to them will be overwritten 311 * instead of the latter), any change to them will be overwritten
312 * by kernel. Returns a negative error code or zero. 312 * by kernel. Returns a negative error code or zero.
313 * @get_fecparam: Get the network device Forward Error Correction parameters.
314 * @set_fecparam: Set the network device Forward Error Correction parameters.
313 * 315 *
314 * All operations are optional (i.e. the function pointer may be set 316 * All operations are optional (i.e. the function pointer may be set
315 * to %NULL) and callers must take this into account. Callers must 317 * to %NULL) and callers must take this into account. Callers must
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index e0c95c9f1e29..e64c0294f50b 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -217,12 +217,10 @@ struct fsnotify_mark_connector {
217 union { /* Object pointer [lock] */ 217 union { /* Object pointer [lock] */
218 struct inode *inode; 218 struct inode *inode;
219 struct vfsmount *mnt; 219 struct vfsmount *mnt;
220 };
221 union {
222 struct hlist_head list;
223 /* Used listing heads to free after srcu period expires */ 220 /* Used listing heads to free after srcu period expires */
224 struct fsnotify_mark_connector *destroy_next; 221 struct fsnotify_mark_connector *destroy_next;
225 }; 222 };
223 struct hlist_head list;
226}; 224};
227 225
228/* 226/*
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index b63fa457febd..3529683f691e 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -85,6 +85,7 @@ struct flchip {
85 unsigned int write_suspended:1; 85 unsigned int write_suspended:1;
86 unsigned int erase_suspended:1; 86 unsigned int erase_suspended:1;
87 unsigned long in_progress_block_addr; 87 unsigned long in_progress_block_addr;
88 unsigned long in_progress_block_mask;
88 89
89 struct mutex mutex; 90 struct mutex mutex;
90 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip 91 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
diff --git a/include/linux/ti-emif-sram.h b/include/linux/ti-emif-sram.h
index 45bc6b376492..53604b087f2c 100644
--- a/include/linux/ti-emif-sram.h
+++ b/include/linux/ti-emif-sram.h
@@ -60,6 +60,81 @@ struct ti_emif_pm_functions {
60 u32 abort_sr; 60 u32 abort_sr;
61} __packed __aligned(8); 61} __packed __aligned(8);
62 62
63static inline void ti_emif_asm_offsets(void)
64{
65 DEFINE(EMIF_SDCFG_VAL_OFFSET,
66 offsetof(struct emif_regs_amx3, emif_sdcfg_val));
67 DEFINE(EMIF_TIMING1_VAL_OFFSET,
68 offsetof(struct emif_regs_amx3, emif_timing1_val));
69 DEFINE(EMIF_TIMING2_VAL_OFFSET,
70 offsetof(struct emif_regs_amx3, emif_timing2_val));
71 DEFINE(EMIF_TIMING3_VAL_OFFSET,
72 offsetof(struct emif_regs_amx3, emif_timing3_val));
73 DEFINE(EMIF_REF_CTRL_VAL_OFFSET,
74 offsetof(struct emif_regs_amx3, emif_ref_ctrl_val));
75 DEFINE(EMIF_ZQCFG_VAL_OFFSET,
76 offsetof(struct emif_regs_amx3, emif_zqcfg_val));
77 DEFINE(EMIF_PMCR_VAL_OFFSET,
78 offsetof(struct emif_regs_amx3, emif_pmcr_val));
79 DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET,
80 offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val));
81 DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET,
82 offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl));
83 DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET,
84 offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh));
85 DEFINE(EMIF_COS_CONFIG_OFFSET,
86 offsetof(struct emif_regs_amx3, emif_cos_config));
87 DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET,
88 offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping));
89 DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET,
90 offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map));
91 DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET,
92 offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map));
93 DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET,
94 offsetof(struct emif_regs_amx3, emif_ocp_config_val));
95 DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET,
96 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim));
97 DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET,
98 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw));
99 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET,
100 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val));
101 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET,
102 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw));
103 DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET,
104 offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1));
105 DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET,
106 offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals));
107 DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3));
108
109 BLANK();
110
111 DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET,
112 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt));
113 DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET,
114 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys));
115 DEFINE(EMIF_PM_CONFIG_OFFSET,
116 offsetof(struct ti_emif_pm_data, ti_emif_sram_config));
117 DEFINE(EMIF_PM_REGS_VIRT_OFFSET,
118 offsetof(struct ti_emif_pm_data, regs_virt));
119 DEFINE(EMIF_PM_REGS_PHYS_OFFSET,
120 offsetof(struct ti_emif_pm_data, regs_phys));
121 DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data));
122
123 BLANK();
124
125 DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET,
126 offsetof(struct ti_emif_pm_functions, save_context));
127 DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET,
128 offsetof(struct ti_emif_pm_functions, restore_context));
129 DEFINE(EMIF_PM_ENTER_SR_OFFSET,
130 offsetof(struct ti_emif_pm_functions, enter_sr));
131 DEFINE(EMIF_PM_EXIT_SR_OFFSET,
132 offsetof(struct ti_emif_pm_functions, exit_sr));
133 DEFINE(EMIF_PM_ABORT_SR_OFFSET,
134 offsetof(struct ti_emif_pm_functions, abort_sr));
135 DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions));
136}
137
63struct gen_pool; 138struct gen_pool;
64 139
65int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst); 140int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst);
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 988c7355bc22..fa1b5da2804e 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -157,6 +157,9 @@ int virtio_device_freeze(struct virtio_device *dev);
157int virtio_device_restore(struct virtio_device *dev); 157int virtio_device_restore(struct virtio_device *dev);
158#endif 158#endif
159 159
160#define virtio_device_for_each_vq(vdev, vq) \
161 list_for_each_entry(vq, &vdev->vqs, list)
162
160/** 163/**
161 * virtio_driver - operations for a virtio I/O driver 164 * virtio_driver - operations for a virtio I/O driver
162 * @driver: underlying device driver (populate name and owner). 165 * @driver: underlying device driver (populate name and owner).
diff --git a/include/net/ife.h b/include/net/ife.h
index 44b9c00f7223..e117617e3c34 100644
--- a/include/net/ife.h
+++ b/include/net/ife.h
@@ -12,7 +12,8 @@
12void *ife_encode(struct sk_buff *skb, u16 metalen); 12void *ife_encode(struct sk_buff *skb, u16 metalen);
13void *ife_decode(struct sk_buff *skb, u16 *metalen); 13void *ife_decode(struct sk_buff *skb, u16 *metalen);
14 14
15void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen); 15void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
16 u16 *dlen, u16 *totlen);
16int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, 17int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
17 const void *dval); 18 const void *dval);
18 19
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index 5c40f118c0fa..df528a623548 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
97 97
98struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, 98struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
99 struct proto *prot, int kern); 99 struct proto *prot, int kern);
100void llc_sk_stop_all_timers(struct sock *sk, bool sync);
100void llc_sk_free(struct sock *sk); 101void llc_sk_free(struct sock *sk);
101 102
102void llc_sk_reset(struct sock *sk); 103void llc_sk_reset(struct sock *sk);
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index 04e0679767f6..e03bd9d41fa8 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -11,8 +11,6 @@ struct scsi_sense_hdr;
11extern void scsi_print_command(struct scsi_cmnd *); 11extern void scsi_print_command(struct scsi_cmnd *);
12extern size_t __scsi_format_command(char *, size_t, 12extern size_t __scsi_format_command(char *, size_t,
13 const unsigned char *, size_t); 13 const unsigned char *, size_t);
14extern void scsi_show_extd_sense(const struct scsi_device *, const char *,
15 unsigned char, unsigned char);
16extern void scsi_print_sense_hdr(const struct scsi_device *, const char *, 14extern void scsi_print_sense_hdr(const struct scsi_device *, const char *,
17 const struct scsi_sense_hdr *); 15 const struct scsi_sense_hdr *);
18extern void scsi_print_sense(const struct scsi_cmnd *); 16extern void scsi_print_sense(const struct scsi_cmnd *);
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 50df5b28d2c9..8ee8991aa099 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -143,13 +143,13 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
143static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag, 143static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
144 void *data, size_t len) 144 void *data, size_t len)
145{ 145{
146 return 0; 146 return -ENOSYS;
147} 147}
148 148
149static inline int rpi_firmware_property_list(struct rpi_firmware *fw, 149static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
150 void *data, size_t tag_size) 150 void *data, size_t tag_size)
151{ 151{
152 return 0; 152 return -ENOSYS;
153} 153}
154 154
155static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node) 155static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
index bf6f82673492..f8260e5c79ad 100644
--- a/include/trace/events/ufs.h
+++ b/include/trace/events/ufs.h
@@ -257,6 +257,33 @@ TRACE_EVENT(ufshcd_command,
257 ) 257 )
258); 258);
259 259
260TRACE_EVENT(ufshcd_upiu,
261 TP_PROTO(const char *dev_name, const char *str, void *hdr, void *tsf),
262
263 TP_ARGS(dev_name, str, hdr, tsf),
264
265 TP_STRUCT__entry(
266 __string(dev_name, dev_name)
267 __string(str, str)
268 __array(unsigned char, hdr, 12)
269 __array(unsigned char, tsf, 16)
270 ),
271
272 TP_fast_assign(
273 __assign_str(dev_name, dev_name);
274 __assign_str(str, str);
275 memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
276 memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
277 ),
278
279 TP_printk(
280 "%s: %s: HDR:%s, CDB:%s",
281 __get_str(str), __get_str(dev_name),
282 __print_hex(__entry->hdr, sizeof(__entry->hdr)),
283 __print_hex(__entry->tsf, sizeof(__entry->tsf))
284 )
285);
286
260#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */ 287#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
261 288
262/* This part must be outside protection */ 289/* This part must be outside protection */
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 2f057a494d93..9a761bc6a251 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -25,6 +25,8 @@ DECLARE_EVENT_CLASS(workqueue_work,
25 TP_printk("work struct %p", __entry->work) 25 TP_printk("work struct %p", __entry->work)
26); 26);
27 27
28struct pool_workqueue;
29
28/** 30/**
29 * workqueue_queue_work - called when a work gets queued 31 * workqueue_queue_work - called when a work gets queued
30 * @req_cpu: the requested cpu 32 * @req_cpu: the requested cpu
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 40297a3181ed..13b8cb563892 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -57,6 +57,21 @@ struct virtio_balloon_config {
57#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */ 57#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */
58#define VIRTIO_BALLOON_S_NR 10 58#define VIRTIO_BALLOON_S_NR 10
59 59
60#define VIRTIO_BALLOON_S_NAMES_WITH_PREFIX(VIRTIO_BALLOON_S_NAMES_prefix) { \
61 VIRTIO_BALLOON_S_NAMES_prefix "swap-in", \
62 VIRTIO_BALLOON_S_NAMES_prefix "swap-out", \
63 VIRTIO_BALLOON_S_NAMES_prefix "major-faults", \
64 VIRTIO_BALLOON_S_NAMES_prefix "minor-faults", \
65 VIRTIO_BALLOON_S_NAMES_prefix "free-memory", \
66 VIRTIO_BALLOON_S_NAMES_prefix "total-memory", \
67 VIRTIO_BALLOON_S_NAMES_prefix "available-memory", \
68 VIRTIO_BALLOON_S_NAMES_prefix "disk-caches", \
69 VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-allocations", \
70 VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures" \
71}
72
73#define VIRTIO_BALLOON_S_NAMES VIRTIO_BALLOON_S_NAMES_WITH_PREFIX("")
74
60/* 75/*
61 * Memory statistics structure. 76 * Memory statistics structure.
62 * Driver fills an array of these structures and passes to device. 77 * Driver fills an array of these structures and passes to device.
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index d315b393abdd..ba03ec39efb3 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1572,13 +1572,32 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
1572 return cnt; 1572 return cnt;
1573} 1573}
1574 1574
1575static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
1576 u32 *prog_ids,
1577 u32 request_cnt)
1578{
1579 int i = 0;
1580
1581 for (; *prog; prog++) {
1582 if (*prog == &dummy_bpf_prog.prog)
1583 continue;
1584 prog_ids[i] = (*prog)->aux->id;
1585 if (++i == request_cnt) {
1586 prog++;
1587 break;
1588 }
1589 }
1590
1591 return !!(*prog);
1592}
1593
1575int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, 1594int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1576 __u32 __user *prog_ids, u32 cnt) 1595 __u32 __user *prog_ids, u32 cnt)
1577{ 1596{
1578 struct bpf_prog **prog; 1597 struct bpf_prog **prog;
1579 unsigned long err = 0; 1598 unsigned long err = 0;
1580 u32 i = 0, *ids;
1581 bool nospc; 1599 bool nospc;
1600 u32 *ids;
1582 1601
1583 /* users of this function are doing: 1602 /* users of this function are doing:
1584 * cnt = bpf_prog_array_length(); 1603 * cnt = bpf_prog_array_length();
@@ -1595,16 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1595 return -ENOMEM; 1614 return -ENOMEM;
1596 rcu_read_lock(); 1615 rcu_read_lock();
1597 prog = rcu_dereference(progs)->progs; 1616 prog = rcu_dereference(progs)->progs;
1598 for (; *prog; prog++) { 1617 nospc = bpf_prog_array_copy_core(prog, ids, cnt);
1599 if (*prog == &dummy_bpf_prog.prog)
1600 continue;
1601 ids[i] = (*prog)->aux->id;
1602 if (++i == cnt) {
1603 prog++;
1604 break;
1605 }
1606 }
1607 nospc = !!(*prog);
1608 rcu_read_unlock(); 1618 rcu_read_unlock();
1609 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 1619 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1610 kfree(ids); 1620 kfree(ids);
@@ -1683,22 +1693,25 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1683} 1693}
1684 1694
1685int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, 1695int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1686 __u32 __user *prog_ids, u32 request_cnt, 1696 u32 *prog_ids, u32 request_cnt,
1687 __u32 __user *prog_cnt) 1697 u32 *prog_cnt)
1688{ 1698{
1699 struct bpf_prog **prog;
1689 u32 cnt = 0; 1700 u32 cnt = 0;
1690 1701
1691 if (array) 1702 if (array)
1692 cnt = bpf_prog_array_length(array); 1703 cnt = bpf_prog_array_length(array);
1693 1704
1694 if (copy_to_user(prog_cnt, &cnt, sizeof(cnt))) 1705 *prog_cnt = cnt;
1695 return -EFAULT;
1696 1706
1697 /* return early if user requested only program count or nothing to copy */ 1707 /* return early if user requested only program count or nothing to copy */
1698 if (!request_cnt || !cnt) 1708 if (!request_cnt || !cnt)
1699 return 0; 1709 return 0;
1700 1710
1701 return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt); 1711 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1712 prog = rcu_dereference_check(array, 1)->progs;
1713 return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
1714 : 0;
1702} 1715}
1703 1716
1704static void bpf_prog_free_deferred(struct work_struct *work) 1717static void bpf_prog_free_deferred(struct work_struct *work)
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 8dd9210d7db7..a3b21385e947 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -1442,9 +1442,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
1442 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 1442 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1443 return ERR_PTR(-EINVAL); 1443 return ERR_PTR(-EINVAL);
1444 1444
1445 if (attr->value_size > KMALLOC_MAX_SIZE)
1446 return ERR_PTR(-E2BIG);
1447
1448 err = bpf_tcp_ulp_register(); 1445 err = bpf_tcp_ulp_register();
1449 if (err && err != -EEXIST) 1446 if (err && err != -EEXIST)
1450 return ERR_PTR(err); 1447 return ERR_PTR(err);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 102160ff5c66..ea619021d901 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2428,7 +2428,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2428 struct kprobe_blacklist_entry *ent = 2428 struct kprobe_blacklist_entry *ent =
2429 list_entry(v, struct kprobe_blacklist_entry, list); 2429 list_entry(v, struct kprobe_blacklist_entry, list);
2430 2430
2431 seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr, 2431 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2432 (void *)ent->end_addr, (void *)ent->start_addr); 2432 (void *)ent->end_addr, (void *)ent->start_addr);
2433 return 0; 2433 return 0;
2434} 2434}
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d88e96d4e12c..56ba0f2a01db 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -977,6 +977,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
977{ 977{
978 struct perf_event_query_bpf __user *uquery = info; 978 struct perf_event_query_bpf __user *uquery = info;
979 struct perf_event_query_bpf query = {}; 979 struct perf_event_query_bpf query = {};
980 u32 *ids, prog_cnt, ids_len;
980 int ret; 981 int ret;
981 982
982 if (!capable(CAP_SYS_ADMIN)) 983 if (!capable(CAP_SYS_ADMIN))
@@ -985,16 +986,32 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
985 return -EINVAL; 986 return -EINVAL;
986 if (copy_from_user(&query, uquery, sizeof(query))) 987 if (copy_from_user(&query, uquery, sizeof(query)))
987 return -EFAULT; 988 return -EFAULT;
988 if (query.ids_len > BPF_TRACE_MAX_PROGS) 989
990 ids_len = query.ids_len;
991 if (ids_len > BPF_TRACE_MAX_PROGS)
989 return -E2BIG; 992 return -E2BIG;
993 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
994 if (!ids)
995 return -ENOMEM;
996 /*
997 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
998 * is required when user only wants to check for uquery->prog_cnt.
999 * There is no need to check for it since the case is handled
1000 * gracefully in bpf_prog_array_copy_info.
1001 */
990 1002
991 mutex_lock(&bpf_event_mutex); 1003 mutex_lock(&bpf_event_mutex);
992 ret = bpf_prog_array_copy_info(event->tp_event->prog_array, 1004 ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
993 uquery->ids, 1005 ids,
994 query.ids_len, 1006 ids_len,
995 &uquery->prog_cnt); 1007 &prog_cnt);
996 mutex_unlock(&bpf_event_mutex); 1008 mutex_unlock(&bpf_event_mutex);
997 1009
1010 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1011 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1012 ret = -EFAULT;
1013
1014 kfree(ids);
998 return ret; 1015 return ret;
999} 1016}
1000 1017
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index e954ae3d82c0..e3a658bac10f 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -356,7 +356,7 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
356 __field( unsigned int, seqnum ) 356 __field( unsigned int, seqnum )
357 ), 357 ),
358 358
359 F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n", 359 F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llu\tnmi-ts:%llu\tnmi-count:%u\n",
360 __entry->seqnum, 360 __entry->seqnum,
361 __entry->tv_sec, 361 __entry->tv_sec,
362 __entry->tv_nsec, 362 __entry->tv_nsec,
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 9b4716bb8bb0..1f951b3df60c 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1499,14 +1499,14 @@ static int process_preds(struct trace_event_call *call,
1499 return ret; 1499 return ret;
1500 } 1500 }
1501 1501
1502 if (!nr_preds) { 1502 if (!nr_preds)
1503 prog = NULL; 1503 return -EINVAL;
1504 } else { 1504
1505 prog = predicate_parse(filter_string, nr_parens, nr_preds, 1505 prog = predicate_parse(filter_string, nr_parens, nr_preds,
1506 parse_pred, call, pe); 1506 parse_pred, call, pe);
1507 if (IS_ERR(prog)) 1507 if (IS_ERR(prog))
1508 return PTR_ERR(prog); 1508 return PTR_ERR(prog);
1509 } 1509
1510 rcu_assign_pointer(filter->prog, prog); 1510 rcu_assign_pointer(filter->prog, prog);
1511 return 0; 1511 return 0;
1512} 1512}
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
index c0bba30fef0a..bbfb229aa067 100644
--- a/lib/dma-direct.c
+++ b/lib/dma-direct.c
@@ -84,7 +84,8 @@ again:
84 __free_pages(page, page_order); 84 __free_pages(page, page_order);
85 page = NULL; 85 page = NULL;
86 86
87 if (dev->coherent_dma_mask < DMA_BIT_MASK(32) && 87 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
88 dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
88 !(gfp & GFP_DMA)) { 89 !(gfp & GFP_DMA)) {
89 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 90 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
90 goto again; 91 goto again;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 032e0fe45940..28a4c3490359 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1825,13 +1825,14 @@ static int compat_table_info(const struct ebt_table_info *info,
1825{ 1825{
1826 unsigned int size = info->entries_size; 1826 unsigned int size = info->entries_size;
1827 const void *entries = info->entries; 1827 const void *entries = info->entries;
1828 int ret;
1829 1828
1830 newinfo->entries_size = size; 1829 newinfo->entries_size = size;
1831 1830 if (info->nentries) {
1832 ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); 1831 int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
1833 if (ret) 1832 info->nentries);
1834 return ret; 1833 if (ret)
1834 return ret;
1835 }
1835 1836
1836 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, 1837 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1837 entries, newinfo); 1838 entries, newinfo);
diff --git a/net/ife/ife.c b/net/ife/ife.c
index 7d1ec76e7f43..13bbf8cb6a39 100644
--- a/net/ife/ife.c
+++ b/net/ife/ife.c
@@ -69,6 +69,9 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen)
69 int total_pull; 69 int total_pull;
70 u16 ifehdrln; 70 u16 ifehdrln;
71 71
72 if (!pskb_may_pull(skb, skb->dev->hard_header_len + IFE_METAHDRLEN))
73 return NULL;
74
72 ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len); 75 ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len);
73 ifehdrln = ntohs(ifehdr->metalen); 76 ifehdrln = ntohs(ifehdr->metalen);
74 total_pull = skb->dev->hard_header_len + ifehdrln; 77 total_pull = skb->dev->hard_header_len + ifehdrln;
@@ -92,12 +95,43 @@ struct meta_tlvhdr {
92 __be16 len; 95 __be16 len;
93}; 96};
94 97
98static bool __ife_tlv_meta_valid(const unsigned char *skbdata,
99 const unsigned char *ifehdr_end)
100{
101 const struct meta_tlvhdr *tlv;
102 u16 tlvlen;
103
104 if (unlikely(skbdata + sizeof(*tlv) > ifehdr_end))
105 return false;
106
107 tlv = (const struct meta_tlvhdr *)skbdata;
108 tlvlen = ntohs(tlv->len);
109
110 /* tlv length field is inc header, check on minimum */
111 if (tlvlen < NLA_HDRLEN)
112 return false;
113
114 /* overflow by NLA_ALIGN check */
115 if (NLA_ALIGN(tlvlen) < tlvlen)
116 return false;
117
118 if (unlikely(skbdata + NLA_ALIGN(tlvlen) > ifehdr_end))
119 return false;
120
121 return true;
122}
123
95/* Caller takes care of presenting data in network order 124/* Caller takes care of presenting data in network order
96 */ 125 */
97void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen) 126void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
127 u16 *dlen, u16 *totlen)
98{ 128{
99 struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata; 129 struct meta_tlvhdr *tlv;
130
131 if (!__ife_tlv_meta_valid(skbdata, ifehdr_end))
132 return NULL;
100 133
134 tlv = (struct meta_tlvhdr *)skbdata;
101 *dlen = ntohs(tlv->len) - NLA_HDRLEN; 135 *dlen = ntohs(tlv->len) - NLA_HDRLEN;
102 *attrtype = ntohs(tlv->type); 136 *attrtype = ntohs(tlv->type);
103 137
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 367def6ddeda..e51c644484dc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3868,11 +3868,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
3868 int length = (th->doff << 2) - sizeof(*th); 3868 int length = (th->doff << 2) - sizeof(*th);
3869 const u8 *ptr = (const u8 *)(th + 1); 3869 const u8 *ptr = (const u8 *)(th + 1);
3870 3870
3871 /* If the TCP option is too short, we can short cut */ 3871 /* If not enough data remaining, we can short cut */
3872 if (length < TCPOLEN_MD5SIG) 3872 while (length >= TCPOLEN_MD5SIG) {
3873 return NULL;
3874
3875 while (length > 0) {
3876 int opcode = *ptr++; 3873 int opcode = *ptr++;
3877 int opsize; 3874 int opsize;
3878 3875
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index ccbfa83e4bb0..ce77bcc2490c 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -48,6 +48,34 @@ config NFT_CHAIN_ROUTE_IPV6
48 fields such as the source, destination, flowlabel, hop-limit and 48 fields such as the source, destination, flowlabel, hop-limit and
49 the packet mark. 49 the packet mark.
50 50
51if NF_NAT_IPV6
52
53config NFT_CHAIN_NAT_IPV6
54 tristate "IPv6 nf_tables nat chain support"
55 help
56 This option enables the "nat" chain for IPv6 in nf_tables. This
57 chain type is used to perform Network Address Translation (NAT)
58 packet transformations such as the source, destination address and
59 source and destination ports.
60
61config NFT_MASQ_IPV6
62 tristate "IPv6 masquerade support for nf_tables"
63 depends on NFT_MASQ
64 select NF_NAT_MASQUERADE_IPV6
65 help
66 This is the expression that provides IPv4 masquerading support for
67 nf_tables.
68
69config NFT_REDIR_IPV6
70 tristate "IPv6 redirect support for nf_tables"
71 depends on NFT_REDIR
72 select NF_NAT_REDIRECT
73 help
74 This is the expression that provides IPv4 redirect support for
75 nf_tables.
76
77endif # NF_NAT_IPV6
78
51config NFT_REJECT_IPV6 79config NFT_REJECT_IPV6
52 select NF_REJECT_IPV6 80 select NF_REJECT_IPV6
53 default NFT_REJECT 81 default NFT_REJECT
@@ -107,39 +135,12 @@ config NF_NAT_IPV6
107 135
108if NF_NAT_IPV6 136if NF_NAT_IPV6
109 137
110config NFT_CHAIN_NAT_IPV6
111 depends on NF_TABLES_IPV6
112 tristate "IPv6 nf_tables nat chain support"
113 help
114 This option enables the "nat" chain for IPv6 in nf_tables. This
115 chain type is used to perform Network Address Translation (NAT)
116 packet transformations such as the source, destination address and
117 source and destination ports.
118
119config NF_NAT_MASQUERADE_IPV6 138config NF_NAT_MASQUERADE_IPV6
120 tristate "IPv6 masquerade support" 139 tristate "IPv6 masquerade support"
121 help 140 help
122 This is the kernel functionality to provide NAT in the masquerade 141 This is the kernel functionality to provide NAT in the masquerade
123 flavour (automatic source address selection) for IPv6. 142 flavour (automatic source address selection) for IPv6.
124 143
125config NFT_MASQ_IPV6
126 tristate "IPv6 masquerade support for nf_tables"
127 depends on NF_TABLES_IPV6
128 depends on NFT_MASQ
129 select NF_NAT_MASQUERADE_IPV6
130 help
131 This is the expression that provides IPv4 masquerading support for
132 nf_tables.
133
134config NFT_REDIR_IPV6
135 tristate "IPv6 redirect support for nf_tables"
136 depends on NF_TABLES_IPV6
137 depends on NFT_REDIR
138 select NF_NAT_REDIRECT
139 help
140 This is the expression that provides IPv4 redirect support for
141 nf_tables.
142
143endif # NF_NAT_IPV6 144endif # NF_NAT_IPV6
144 145
145config IP6_NF_IPTABLES 146config IP6_NF_IPTABLES
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 49b954d6d0fa..cde7d8251377 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3975,6 +3975,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
3975 3975
3976static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { 3976static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
3977 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, 3977 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
3978 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
3978 [RTA_OIF] = { .type = NLA_U32 }, 3979 [RTA_OIF] = { .type = NLA_U32 },
3979 [RTA_IIF] = { .type = NLA_U32 }, 3980 [RTA_IIF] = { .type = NLA_U32 },
3980 [RTA_PRIORITY] = { .type = NLA_U32 }, 3981 [RTA_PRIORITY] = { .type = NLA_U32 },
@@ -3986,6 +3987,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
3986 [RTA_EXPIRES] = { .type = NLA_U32 }, 3987 [RTA_EXPIRES] = { .type = NLA_U32 },
3987 [RTA_UID] = { .type = NLA_U32 }, 3988 [RTA_UID] = { .type = NLA_U32 },
3988 [RTA_MARK] = { .type = NLA_U32 }, 3989 [RTA_MARK] = { .type = NLA_U32 },
3990 [RTA_TABLE] = { .type = NLA_U32 },
3989}; 3991};
3990 3992
3991static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, 3993static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index f343e6f0fc95..5fe139484919 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -136,7 +136,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
136 isrh->nexthdr = proto; 136 isrh->nexthdr = proto;
137 137
138 hdr->daddr = isrh->segments[isrh->first_segment]; 138 hdr->daddr = isrh->segments[isrh->first_segment];
139 set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr); 139 set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
140 140
141#ifdef CONFIG_IPV6_SEG6_HMAC 141#ifdef CONFIG_IPV6_SEG6_HMAC
142 if (sr_has_hmac(isrh)) { 142 if (sr_has_hmac(isrh)) {
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index b8f9d45bfeb1..7f1e842ef05a 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -106,8 +106,11 @@ static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
106 return; 106 return;
107 107
108 /* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */ 108 /* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */
109 if (pd->tunnel) 109 if (pd->tunnel) {
110 l2tp_tunnel_dec_refcount(pd->tunnel); 110 l2tp_tunnel_dec_refcount(pd->tunnel);
111 pd->tunnel = NULL;
112 pd->session = NULL;
113 }
111} 114}
112 115
113static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) 116static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 7d0c963680e6..1fd9e145076a 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -619,6 +619,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
619 lock_sock(sk); 619 lock_sock(sk);
620 620
621 error = -EINVAL; 621 error = -EINVAL;
622
623 if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
624 sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
625 sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
626 sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
627 goto end;
628
622 if (sp->sa_protocol != PX_PROTO_OL2TP) 629 if (sp->sa_protocol != PX_PROTO_OL2TP)
623 goto end; 630 goto end;
624 631
@@ -1618,8 +1625,11 @@ static void pppol2tp_seq_stop(struct seq_file *p, void *v)
1618 return; 1625 return;
1619 1626
1620 /* Drop reference taken by last invocation of pppol2tp_next_tunnel() */ 1627 /* Drop reference taken by last invocation of pppol2tp_next_tunnel() */
1621 if (pd->tunnel) 1628 if (pd->tunnel) {
1622 l2tp_tunnel_dec_refcount(pd->tunnel); 1629 l2tp_tunnel_dec_refcount(pd->tunnel);
1630 pd->tunnel = NULL;
1631 pd->session = NULL;
1632 }
1623} 1633}
1624 1634
1625static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) 1635static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 6d29b2b94e84..cb80ebb38311 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -189,7 +189,6 @@ static int llc_ui_release(struct socket *sock)
189{ 189{
190 struct sock *sk = sock->sk; 190 struct sock *sk = sock->sk;
191 struct llc_sock *llc; 191 struct llc_sock *llc;
192 struct llc_sap *sap;
193 192
194 if (unlikely(sk == NULL)) 193 if (unlikely(sk == NULL))
195 goto out; 194 goto out;
@@ -200,15 +199,19 @@ static int llc_ui_release(struct socket *sock)
200 llc->laddr.lsap, llc->daddr.lsap); 199 llc->laddr.lsap, llc->daddr.lsap);
201 if (!llc_send_disc(sk)) 200 if (!llc_send_disc(sk))
202 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); 201 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
203 sap = llc->sap; 202 if (!sock_flag(sk, SOCK_ZAPPED)) {
204 /* Hold this for release_sock(), so that llc_backlog_rcv() could still 203 struct llc_sap *sap = llc->sap;
205 * use it. 204
206 */ 205 /* Hold this for release_sock(), so that llc_backlog_rcv()
207 llc_sap_hold(sap); 206 * could still use it.
208 if (!sock_flag(sk, SOCK_ZAPPED)) 207 */
208 llc_sap_hold(sap);
209 llc_sap_remove_socket(llc->sap, sk); 209 llc_sap_remove_socket(llc->sap, sk);
210 release_sock(sk); 210 release_sock(sk);
211 llc_sap_put(sap); 211 llc_sap_put(sap);
212 } else {
213 release_sock(sk);
214 }
212 if (llc->dev) 215 if (llc->dev)
213 dev_put(llc->dev); 216 dev_put(llc->dev);
214 sock_put(sk); 217 sock_put(sk);
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 163121192aca..4d78375f9872 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -1099,14 +1099,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
1099 1099
1100int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb) 1100int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
1101{ 1101{
1102 struct llc_sock *llc = llc_sk(sk); 1102 llc_sk_stop_all_timers(sk, false);
1103
1104 del_timer(&llc->pf_cycle_timer.timer);
1105 del_timer(&llc->ack_timer.timer);
1106 del_timer(&llc->rej_sent_timer.timer);
1107 del_timer(&llc->busy_state_timer.timer);
1108 llc->ack_must_be_send = 0;
1109 llc->ack_pf = 0;
1110 return 0; 1103 return 0;
1111} 1104}
1112 1105
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 110e32bcb399..c0ac522b48a1 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -961,6 +961,26 @@ out:
961 return sk; 961 return sk;
962} 962}
963 963
964void llc_sk_stop_all_timers(struct sock *sk, bool sync)
965{
966 struct llc_sock *llc = llc_sk(sk);
967
968 if (sync) {
969 del_timer_sync(&llc->pf_cycle_timer.timer);
970 del_timer_sync(&llc->ack_timer.timer);
971 del_timer_sync(&llc->rej_sent_timer.timer);
972 del_timer_sync(&llc->busy_state_timer.timer);
973 } else {
974 del_timer(&llc->pf_cycle_timer.timer);
975 del_timer(&llc->ack_timer.timer);
976 del_timer(&llc->rej_sent_timer.timer);
977 del_timer(&llc->busy_state_timer.timer);
978 }
979
980 llc->ack_must_be_send = 0;
981 llc->ack_pf = 0;
982}
983
964/** 984/**
965 * llc_sk_free - Frees a LLC socket 985 * llc_sk_free - Frees a LLC socket
966 * @sk - socket to free 986 * @sk - socket to free
@@ -973,7 +993,7 @@ void llc_sk_free(struct sock *sk)
973 993
974 llc->state = LLC_CONN_OUT_OF_SVC; 994 llc->state = LLC_CONN_OUT_OF_SVC;
975 /* Stop all (possibly) running timers */ 995 /* Stop all (possibly) running timers */
976 llc_conn_ac_stop_all_timers(sk, NULL); 996 llc_sk_stop_all_timers(sk, true);
977#ifdef DEBUG_LLC_CONN_ALLOC 997#ifdef DEBUG_LLC_CONN_ALLOC
978 printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__, 998 printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
979 skb_queue_len(&llc->pdu_unack_q), 999 skb_queue_len(&llc->pdu_unack_q),
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 704b3832dbad..44d8a55e9721 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -594,6 +594,7 @@ config NFT_QUOTA
594config NFT_REJECT 594config NFT_REJECT
595 default m if NETFILTER_ADVANCED=n 595 default m if NETFILTER_ADVANCED=n
596 tristate "Netfilter nf_tables reject support" 596 tristate "Netfilter nf_tables reject support"
597 depends on !NF_TABLES_INET || (IPV6!=m || m)
597 help 598 help
598 This option adds the "reject" expression that you can use to 599 This option adds the "reject" expression that you can use to
599 explicitly deny and notify via TCP reset/ICMP informational errors 600 explicitly deny and notify via TCP reset/ICMP informational errors
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 5ebde4b15810..f36098887ad0 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2384,11 +2384,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2384 strlcpy(cfg.mcast_ifn, dm->mcast_ifn, 2384 strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
2385 sizeof(cfg.mcast_ifn)); 2385 sizeof(cfg.mcast_ifn));
2386 cfg.syncid = dm->syncid; 2386 cfg.syncid = dm->syncid;
2387 rtnl_lock();
2388 mutex_lock(&ipvs->sync_mutex);
2389 ret = start_sync_thread(ipvs, &cfg, dm->state); 2387 ret = start_sync_thread(ipvs, &cfg, dm->state);
2390 mutex_unlock(&ipvs->sync_mutex);
2391 rtnl_unlock();
2392 } else { 2388 } else {
2393 mutex_lock(&ipvs->sync_mutex); 2389 mutex_lock(&ipvs->sync_mutex);
2394 ret = stop_sync_thread(ipvs, dm->state); 2390 ret = stop_sync_thread(ipvs, dm->state);
@@ -3481,12 +3477,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
3481 if (ipvs->mixed_address_family_dests > 0) 3477 if (ipvs->mixed_address_family_dests > 0)
3482 return -EINVAL; 3478 return -EINVAL;
3483 3479
3484 rtnl_lock();
3485 mutex_lock(&ipvs->sync_mutex);
3486 ret = start_sync_thread(ipvs, &c, 3480 ret = start_sync_thread(ipvs, &c,
3487 nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); 3481 nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
3488 mutex_unlock(&ipvs->sync_mutex);
3489 rtnl_unlock();
3490 return ret; 3482 return ret;
3491} 3483}
3492 3484
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index fbaf3bd05b2e..001501e25625 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -49,6 +49,7 @@
49#include <linux/kthread.h> 49#include <linux/kthread.h>
50#include <linux/wait.h> 50#include <linux/wait.h>
51#include <linux/kernel.h> 51#include <linux/kernel.h>
52#include <linux/sched/signal.h>
52 53
53#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */ 54#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
54 55
@@ -1360,15 +1361,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val)
1360/* 1361/*
1361 * Specifiy default interface for outgoing multicasts 1362 * Specifiy default interface for outgoing multicasts
1362 */ 1363 */
1363static int set_mcast_if(struct sock *sk, char *ifname) 1364static int set_mcast_if(struct sock *sk, struct net_device *dev)
1364{ 1365{
1365 struct net_device *dev;
1366 struct inet_sock *inet = inet_sk(sk); 1366 struct inet_sock *inet = inet_sk(sk);
1367 struct net *net = sock_net(sk);
1368
1369 dev = __dev_get_by_name(net, ifname);
1370 if (!dev)
1371 return -ENODEV;
1372 1367
1373 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) 1368 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
1374 return -EINVAL; 1369 return -EINVAL;
@@ -1396,19 +1391,14 @@ static int set_mcast_if(struct sock *sk, char *ifname)
1396 * in the in_addr structure passed in as a parameter. 1391 * in the in_addr structure passed in as a parameter.
1397 */ 1392 */
1398static int 1393static int
1399join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) 1394join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
1400{ 1395{
1401 struct net *net = sock_net(sk);
1402 struct ip_mreqn mreq; 1396 struct ip_mreqn mreq;
1403 struct net_device *dev;
1404 int ret; 1397 int ret;
1405 1398
1406 memset(&mreq, 0, sizeof(mreq)); 1399 memset(&mreq, 0, sizeof(mreq));
1407 memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr)); 1400 memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
1408 1401
1409 dev = __dev_get_by_name(net, ifname);
1410 if (!dev)
1411 return -ENODEV;
1412 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) 1402 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
1413 return -EINVAL; 1403 return -EINVAL;
1414 1404
@@ -1423,15 +1413,10 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
1423 1413
1424#ifdef CONFIG_IP_VS_IPV6 1414#ifdef CONFIG_IP_VS_IPV6
1425static int join_mcast_group6(struct sock *sk, struct in6_addr *addr, 1415static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
1426 char *ifname) 1416 struct net_device *dev)
1427{ 1417{
1428 struct net *net = sock_net(sk);
1429 struct net_device *dev;
1430 int ret; 1418 int ret;
1431 1419
1432 dev = __dev_get_by_name(net, ifname);
1433 if (!dev)
1434 return -ENODEV;
1435 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) 1420 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
1436 return -EINVAL; 1421 return -EINVAL;
1437 1422
@@ -1443,24 +1428,18 @@ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
1443} 1428}
1444#endif 1429#endif
1445 1430
1446static int bind_mcastif_addr(struct socket *sock, char *ifname) 1431static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
1447{ 1432{
1448 struct net *net = sock_net(sock->sk);
1449 struct net_device *dev;
1450 __be32 addr; 1433 __be32 addr;
1451 struct sockaddr_in sin; 1434 struct sockaddr_in sin;
1452 1435
1453 dev = __dev_get_by_name(net, ifname);
1454 if (!dev)
1455 return -ENODEV;
1456
1457 addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); 1436 addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
1458 if (!addr) 1437 if (!addr)
1459 pr_err("You probably need to specify IP address on " 1438 pr_err("You probably need to specify IP address on "
1460 "multicast interface.\n"); 1439 "multicast interface.\n");
1461 1440
1462 IP_VS_DBG(7, "binding socket with (%s) %pI4\n", 1441 IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
1463 ifname, &addr); 1442 dev->name, &addr);
1464 1443
1465 /* Now bind the socket with the address of multicast interface */ 1444 /* Now bind the socket with the address of multicast interface */
1466 sin.sin_family = AF_INET; 1445 sin.sin_family = AF_INET;
@@ -1493,7 +1472,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
1493/* 1472/*
1494 * Set up sending multicast socket over UDP 1473 * Set up sending multicast socket over UDP
1495 */ 1474 */
1496static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id) 1475static int make_send_sock(struct netns_ipvs *ipvs, int id,
1476 struct net_device *dev, struct socket **sock_ret)
1497{ 1477{
1498 /* multicast addr */ 1478 /* multicast addr */
1499 union ipvs_sockaddr mcast_addr; 1479 union ipvs_sockaddr mcast_addr;
@@ -1505,9 +1485,10 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
1505 IPPROTO_UDP, &sock); 1485 IPPROTO_UDP, &sock);
1506 if (result < 0) { 1486 if (result < 0) {
1507 pr_err("Error during creation of socket; terminating\n"); 1487 pr_err("Error during creation of socket; terminating\n");
1508 return ERR_PTR(result); 1488 goto error;
1509 } 1489 }
1510 result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn); 1490 *sock_ret = sock;
1491 result = set_mcast_if(sock->sk, dev);
1511 if (result < 0) { 1492 if (result < 0) {
1512 pr_err("Error setting outbound mcast interface\n"); 1493 pr_err("Error setting outbound mcast interface\n");
1513 goto error; 1494 goto error;
@@ -1522,7 +1503,7 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
1522 set_sock_size(sock->sk, 1, result); 1503 set_sock_size(sock->sk, 1, result);
1523 1504
1524 if (AF_INET == ipvs->mcfg.mcast_af) 1505 if (AF_INET == ipvs->mcfg.mcast_af)
1525 result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn); 1506 result = bind_mcastif_addr(sock, dev);
1526 else 1507 else
1527 result = 0; 1508 result = 0;
1528 if (result < 0) { 1509 if (result < 0) {
@@ -1538,19 +1519,18 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
1538 goto error; 1519 goto error;
1539 } 1520 }
1540 1521
1541 return sock; 1522 return 0;
1542 1523
1543error: 1524error:
1544 sock_release(sock); 1525 return result;
1545 return ERR_PTR(result);
1546} 1526}
1547 1527
1548 1528
1549/* 1529/*
1550 * Set up receiving multicast socket over UDP 1530 * Set up receiving multicast socket over UDP
1551 */ 1531 */
1552static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, 1532static int make_receive_sock(struct netns_ipvs *ipvs, int id,
1553 int ifindex) 1533 struct net_device *dev, struct socket **sock_ret)
1554{ 1534{
1555 /* multicast addr */ 1535 /* multicast addr */
1556 union ipvs_sockaddr mcast_addr; 1536 union ipvs_sockaddr mcast_addr;
@@ -1562,8 +1542,9 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
1562 IPPROTO_UDP, &sock); 1542 IPPROTO_UDP, &sock);
1563 if (result < 0) { 1543 if (result < 0) {
1564 pr_err("Error during creation of socket; terminating\n"); 1544 pr_err("Error during creation of socket; terminating\n");
1565 return ERR_PTR(result); 1545 goto error;
1566 } 1546 }
1547 *sock_ret = sock;
1567 /* it is equivalent to the REUSEADDR option in user-space */ 1548 /* it is equivalent to the REUSEADDR option in user-space */
1568 sock->sk->sk_reuse = SK_CAN_REUSE; 1549 sock->sk->sk_reuse = SK_CAN_REUSE;
1569 result = sysctl_sync_sock_size(ipvs); 1550 result = sysctl_sync_sock_size(ipvs);
@@ -1571,7 +1552,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
1571 set_sock_size(sock->sk, 0, result); 1552 set_sock_size(sock->sk, 0, result);
1572 1553
1573 get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id); 1554 get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
1574 sock->sk->sk_bound_dev_if = ifindex; 1555 sock->sk->sk_bound_dev_if = dev->ifindex;
1575 result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen); 1556 result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
1576 if (result < 0) { 1557 if (result < 0) {
1577 pr_err("Error binding to the multicast addr\n"); 1558 pr_err("Error binding to the multicast addr\n");
@@ -1582,21 +1563,20 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
1582#ifdef CONFIG_IP_VS_IPV6 1563#ifdef CONFIG_IP_VS_IPV6
1583 if (ipvs->bcfg.mcast_af == AF_INET6) 1564 if (ipvs->bcfg.mcast_af == AF_INET6)
1584 result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr, 1565 result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
1585 ipvs->bcfg.mcast_ifn); 1566 dev);
1586 else 1567 else
1587#endif 1568#endif
1588 result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr, 1569 result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
1589 ipvs->bcfg.mcast_ifn); 1570 dev);
1590 if (result < 0) { 1571 if (result < 0) {
1591 pr_err("Error joining to the multicast group\n"); 1572 pr_err("Error joining to the multicast group\n");
1592 goto error; 1573 goto error;
1593 } 1574 }
1594 1575
1595 return sock; 1576 return 0;
1596 1577
1597error: 1578error:
1598 sock_release(sock); 1579 return result;
1599 return ERR_PTR(result);
1600} 1580}
1601 1581
1602 1582
@@ -1778,13 +1758,12 @@ static int sync_thread_backup(void *data)
1778int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, 1758int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1779 int state) 1759 int state)
1780{ 1760{
1781 struct ip_vs_sync_thread_data *tinfo; 1761 struct ip_vs_sync_thread_data *tinfo = NULL;
1782 struct task_struct **array = NULL, *task; 1762 struct task_struct **array = NULL, *task;
1783 struct socket *sock;
1784 struct net_device *dev; 1763 struct net_device *dev;
1785 char *name; 1764 char *name;
1786 int (*threadfn)(void *data); 1765 int (*threadfn)(void *data);
1787 int id, count, hlen; 1766 int id = 0, count, hlen;
1788 int result = -ENOMEM; 1767 int result = -ENOMEM;
1789 u16 mtu, min_mtu; 1768 u16 mtu, min_mtu;
1790 1769
@@ -1792,6 +1771,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1792 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n", 1771 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
1793 sizeof(struct ip_vs_sync_conn_v0)); 1772 sizeof(struct ip_vs_sync_conn_v0));
1794 1773
1774 /* Do not hold one mutex and then to block on another */
1775 for (;;) {
1776 rtnl_lock();
1777 if (mutex_trylock(&ipvs->sync_mutex))
1778 break;
1779 rtnl_unlock();
1780 mutex_lock(&ipvs->sync_mutex);
1781 if (rtnl_trylock())
1782 break;
1783 mutex_unlock(&ipvs->sync_mutex);
1784 }
1785
1795 if (!ipvs->sync_state) { 1786 if (!ipvs->sync_state) {
1796 count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX); 1787 count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
1797 ipvs->threads_mask = count - 1; 1788 ipvs->threads_mask = count - 1;
@@ -1810,7 +1801,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1810 dev = __dev_get_by_name(ipvs->net, c->mcast_ifn); 1801 dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
1811 if (!dev) { 1802 if (!dev) {
1812 pr_err("Unknown mcast interface: %s\n", c->mcast_ifn); 1803 pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
1813 return -ENODEV; 1804 result = -ENODEV;
1805 goto out_early;
1814 } 1806 }
1815 hlen = (AF_INET6 == c->mcast_af) ? 1807 hlen = (AF_INET6 == c->mcast_af) ?
1816 sizeof(struct ipv6hdr) + sizeof(struct udphdr) : 1808 sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
@@ -1827,26 +1819,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1827 c->sync_maxlen = mtu - hlen; 1819 c->sync_maxlen = mtu - hlen;
1828 1820
1829 if (state == IP_VS_STATE_MASTER) { 1821 if (state == IP_VS_STATE_MASTER) {
1822 result = -EEXIST;
1830 if (ipvs->ms) 1823 if (ipvs->ms)
1831 return -EEXIST; 1824 goto out_early;
1832 1825
1833 ipvs->mcfg = *c; 1826 ipvs->mcfg = *c;
1834 name = "ipvs-m:%d:%d"; 1827 name = "ipvs-m:%d:%d";
1835 threadfn = sync_thread_master; 1828 threadfn = sync_thread_master;
1836 } else if (state == IP_VS_STATE_BACKUP) { 1829 } else if (state == IP_VS_STATE_BACKUP) {
1830 result = -EEXIST;
1837 if (ipvs->backup_threads) 1831 if (ipvs->backup_threads)
1838 return -EEXIST; 1832 goto out_early;
1839 1833
1840 ipvs->bcfg = *c; 1834 ipvs->bcfg = *c;
1841 name = "ipvs-b:%d:%d"; 1835 name = "ipvs-b:%d:%d";
1842 threadfn = sync_thread_backup; 1836 threadfn = sync_thread_backup;
1843 } else { 1837 } else {
1844 return -EINVAL; 1838 result = -EINVAL;
1839 goto out_early;
1845 } 1840 }
1846 1841
1847 if (state == IP_VS_STATE_MASTER) { 1842 if (state == IP_VS_STATE_MASTER) {
1848 struct ipvs_master_sync_state *ms; 1843 struct ipvs_master_sync_state *ms;
1849 1844
1845 result = -ENOMEM;
1850 ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL); 1846 ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL);
1851 if (!ipvs->ms) 1847 if (!ipvs->ms)
1852 goto out; 1848 goto out;
@@ -1862,39 +1858,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1862 } else { 1858 } else {
1863 array = kcalloc(count, sizeof(struct task_struct *), 1859 array = kcalloc(count, sizeof(struct task_struct *),
1864 GFP_KERNEL); 1860 GFP_KERNEL);
1861 result = -ENOMEM;
1865 if (!array) 1862 if (!array)
1866 goto out; 1863 goto out;
1867 } 1864 }
1868 1865
1869 tinfo = NULL;
1870 for (id = 0; id < count; id++) { 1866 for (id = 0; id < count; id++) {
1871 if (state == IP_VS_STATE_MASTER) 1867 result = -ENOMEM;
1872 sock = make_send_sock(ipvs, id);
1873 else
1874 sock = make_receive_sock(ipvs, id, dev->ifindex);
1875 if (IS_ERR(sock)) {
1876 result = PTR_ERR(sock);
1877 goto outtinfo;
1878 }
1879 tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); 1868 tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
1880 if (!tinfo) 1869 if (!tinfo)
1881 goto outsocket; 1870 goto out;
1882 tinfo->ipvs = ipvs; 1871 tinfo->ipvs = ipvs;
1883 tinfo->sock = sock; 1872 tinfo->sock = NULL;
1884 if (state == IP_VS_STATE_BACKUP) { 1873 if (state == IP_VS_STATE_BACKUP) {
1885 tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen, 1874 tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
1886 GFP_KERNEL); 1875 GFP_KERNEL);
1887 if (!tinfo->buf) 1876 if (!tinfo->buf)
1888 goto outtinfo; 1877 goto out;
1889 } else { 1878 } else {
1890 tinfo->buf = NULL; 1879 tinfo->buf = NULL;
1891 } 1880 }
1892 tinfo->id = id; 1881 tinfo->id = id;
1882 if (state == IP_VS_STATE_MASTER)
1883 result = make_send_sock(ipvs, id, dev, &tinfo->sock);
1884 else
1885 result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
1886 if (result < 0)
1887 goto out;
1893 1888
1894 task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); 1889 task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
1895 if (IS_ERR(task)) { 1890 if (IS_ERR(task)) {
1896 result = PTR_ERR(task); 1891 result = PTR_ERR(task);
1897 goto outtinfo; 1892 goto out;
1898 } 1893 }
1899 tinfo = NULL; 1894 tinfo = NULL;
1900 if (state == IP_VS_STATE_MASTER) 1895 if (state == IP_VS_STATE_MASTER)
@@ -1911,20 +1906,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1911 ipvs->sync_state |= state; 1906 ipvs->sync_state |= state;
1912 spin_unlock_bh(&ipvs->sync_buff_lock); 1907 spin_unlock_bh(&ipvs->sync_buff_lock);
1913 1908
1909 mutex_unlock(&ipvs->sync_mutex);
1910 rtnl_unlock();
1911
1914 /* increase the module use count */ 1912 /* increase the module use count */
1915 ip_vs_use_count_inc(); 1913 ip_vs_use_count_inc();
1916 1914
1917 return 0; 1915 return 0;
1918 1916
1919outsocket: 1917out:
1920 sock_release(sock); 1918 /* We do not need RTNL lock anymore, release it here so that
1921 1919 * sock_release below and in the kthreads can use rtnl_lock
1922outtinfo: 1920 * to leave the mcast group.
1923 if (tinfo) { 1921 */
1924 sock_release(tinfo->sock); 1922 rtnl_unlock();
1925 kfree(tinfo->buf);
1926 kfree(tinfo);
1927 }
1928 count = id; 1923 count = id;
1929 while (count-- > 0) { 1924 while (count-- > 0) {
1930 if (state == IP_VS_STATE_MASTER) 1925 if (state == IP_VS_STATE_MASTER)
@@ -1932,13 +1927,23 @@ outtinfo:
1932 else 1927 else
1933 kthread_stop(array[count]); 1928 kthread_stop(array[count]);
1934 } 1929 }
1935 kfree(array);
1936
1937out:
1938 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { 1930 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
1939 kfree(ipvs->ms); 1931 kfree(ipvs->ms);
1940 ipvs->ms = NULL; 1932 ipvs->ms = NULL;
1941 } 1933 }
1934 mutex_unlock(&ipvs->sync_mutex);
1935 if (tinfo) {
1936 if (tinfo->sock)
1937 sock_release(tinfo->sock);
1938 kfree(tinfo->buf);
1939 kfree(tinfo);
1940 }
1941 kfree(array);
1942 return result;
1943
1944out_early:
1945 mutex_unlock(&ipvs->sync_mutex);
1946 rtnl_unlock();
1942 return result; 1947 return result;
1943} 1948}
1944 1949
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 8ef21d9f9a00..4b2b3d53acfc 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -252,7 +252,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
252static inline int expect_matches(const struct nf_conntrack_expect *a, 252static inline int expect_matches(const struct nf_conntrack_expect *a,
253 const struct nf_conntrack_expect *b) 253 const struct nf_conntrack_expect *b)
254{ 254{
255 return a->master == b->master && a->class == b->class && 255 return a->master == b->master &&
256 nf_ct_tuple_equal(&a->tuple, &b->tuple) && 256 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
257 nf_ct_tuple_mask_equal(&a->mask, &b->mask) && 257 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
258 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) && 258 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
@@ -421,6 +421,9 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
421 h = nf_ct_expect_dst_hash(net, &expect->tuple); 421 h = nf_ct_expect_dst_hash(net, &expect->tuple);
422 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { 422 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
423 if (expect_matches(i, expect)) { 423 if (expect_matches(i, expect)) {
424 if (i->class != expect->class)
425 return -EALREADY;
426
424 if (nf_ct_remove_expect(i)) 427 if (nf_ct_remove_expect(i))
425 break; 428 break;
426 } else if (expect_clash(i, expect)) { 429 } else if (expect_clash(i, expect)) {
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 9fe0ddc333fb..277bbfe26478 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -9,6 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/kmemleak.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/mutex.h> 14#include <linux/mutex.h>
14#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
@@ -71,6 +72,7 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
71 rcu_read_unlock(); 72 rcu_read_unlock();
72 73
73 alloc = max(newlen, NF_CT_EXT_PREALLOC); 74 alloc = max(newlen, NF_CT_EXT_PREALLOC);
75 kmemleak_not_leak(old);
74 new = __krealloc(old, alloc, gfp); 76 new = __krealloc(old, alloc, gfp);
75 if (!new) 77 if (!new)
76 return NULL; 78 return NULL;
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 4dbb5bad4363..908e51e2dc2b 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -938,11 +938,19 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
938 datalen, rtp_exp, rtcp_exp, 938 datalen, rtp_exp, rtcp_exp,
939 mediaoff, medialen, daddr); 939 mediaoff, medialen, daddr);
940 else { 940 else {
941 if (nf_ct_expect_related(rtp_exp) == 0) { 941 /* -EALREADY handling works around end-points that send
942 if (nf_ct_expect_related(rtcp_exp) != 0) 942 * SDP messages with identical port but different media type,
943 nf_ct_unexpect_related(rtp_exp); 943 * we pretend expectation was set up.
944 else 944 */
945 int errp = nf_ct_expect_related(rtp_exp);
946
947 if (errp == 0 || errp == -EALREADY) {
948 int errcp = nf_ct_expect_related(rtcp_exp);
949
950 if (errcp == 0 || errcp == -EALREADY)
945 ret = NF_ACCEPT; 951 ret = NF_ACCEPT;
952 else if (errp == 0)
953 nf_ct_unexpect_related(rtp_exp);
946 } 954 }
947 } 955 }
948 nf_ct_expect_put(rtcp_exp); 956 nf_ct_expect_put(rtcp_exp);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 9134cc429ad4..04d4e3772584 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2361,41 +2361,46 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2361 } 2361 }
2362 2362
2363 if (nlh->nlmsg_flags & NLM_F_REPLACE) { 2363 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
2364 if (nft_is_active_next(net, old_rule)) { 2364 if (!nft_is_active_next(net, old_rule)) {
2365 trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
2366 old_rule);
2367 if (trans == NULL) {
2368 err = -ENOMEM;
2369 goto err2;
2370 }
2371 nft_deactivate_next(net, old_rule);
2372 chain->use--;
2373 list_add_tail_rcu(&rule->list, &old_rule->list);
2374 } else {
2375 err = -ENOENT; 2365 err = -ENOENT;
2376 goto err2; 2366 goto err2;
2377 } 2367 }
2378 } else if (nlh->nlmsg_flags & NLM_F_APPEND) 2368 trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
2379 if (old_rule) 2369 old_rule);
2380 list_add_rcu(&rule->list, &old_rule->list); 2370 if (trans == NULL) {
2381 else 2371 err = -ENOMEM;
2382 list_add_tail_rcu(&rule->list, &chain->rules); 2372 goto err2;
2383 else { 2373 }
2384 if (old_rule) 2374 nft_deactivate_next(net, old_rule);
2385 list_add_tail_rcu(&rule->list, &old_rule->list); 2375 chain->use--;
2386 else
2387 list_add_rcu(&rule->list, &chain->rules);
2388 }
2389 2376
2390 if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { 2377 if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
2391 err = -ENOMEM; 2378 err = -ENOMEM;
2392 goto err3; 2379 goto err2;
2380 }
2381
2382 list_add_tail_rcu(&rule->list, &old_rule->list);
2383 } else {
2384 if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
2385 err = -ENOMEM;
2386 goto err2;
2387 }
2388
2389 if (nlh->nlmsg_flags & NLM_F_APPEND) {
2390 if (old_rule)
2391 list_add_rcu(&rule->list, &old_rule->list);
2392 else
2393 list_add_tail_rcu(&rule->list, &chain->rules);
2394 } else {
2395 if (old_rule)
2396 list_add_tail_rcu(&rule->list, &old_rule->list);
2397 else
2398 list_add_rcu(&rule->list, &chain->rules);
2399 }
2393 } 2400 }
2394 chain->use++; 2401 chain->use++;
2395 return 0; 2402 return 0;
2396 2403
2397err3:
2398 list_del_rcu(&rule->list);
2399err2: 2404err2:
2400 nf_tables_rule_destroy(&ctx, rule); 2405 nf_tables_rule_destroy(&ctx, rule);
2401err1: 2406err1:
@@ -3207,18 +3212,20 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
3207 3212
3208 err = ops->init(set, &desc, nla); 3213 err = ops->init(set, &desc, nla);
3209 if (err < 0) 3214 if (err < 0)
3210 goto err2; 3215 goto err3;
3211 3216
3212 err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); 3217 err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
3213 if (err < 0) 3218 if (err < 0)
3214 goto err3; 3219 goto err4;
3215 3220
3216 list_add_tail_rcu(&set->list, &table->sets); 3221 list_add_tail_rcu(&set->list, &table->sets);
3217 table->use++; 3222 table->use++;
3218 return 0; 3223 return 0;
3219 3224
3220err3: 3225err4:
3221 ops->destroy(set); 3226 ops->destroy(set);
3227err3:
3228 kfree(set->name);
3222err2: 3229err2:
3223 kvfree(set); 3230 kvfree(set);
3224err1: 3231err1:
@@ -5738,7 +5745,7 @@ static void nft_chain_commit_update(struct nft_trans *trans)
5738 struct nft_base_chain *basechain; 5745 struct nft_base_chain *basechain;
5739 5746
5740 if (nft_trans_chain_name(trans)) 5747 if (nft_trans_chain_name(trans))
5741 strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans)); 5748 swap(trans->ctx.chain->name, nft_trans_chain_name(trans));
5742 5749
5743 if (!nft_is_base_chain(trans->ctx.chain)) 5750 if (!nft_is_base_chain(trans->ctx.chain))
5744 return; 5751 return;
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index 773da82190dc..94df000abb92 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -36,11 +36,10 @@ MODULE_ALIAS("ipt_connmark");
36MODULE_ALIAS("ip6t_connmark"); 36MODULE_ALIAS("ip6t_connmark");
37 37
38static unsigned int 38static unsigned int
39connmark_tg_shift(struct sk_buff *skb, 39connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
40 const struct xt_connmark_tginfo1 *info,
41 u8 shift_bits, u8 shift_dir)
42{ 40{
43 enum ip_conntrack_info ctinfo; 41 enum ip_conntrack_info ctinfo;
42 u_int32_t new_targetmark;
44 struct nf_conn *ct; 43 struct nf_conn *ct;
45 u_int32_t newmark; 44 u_int32_t newmark;
46 45
@@ -51,34 +50,39 @@ connmark_tg_shift(struct sk_buff *skb,
51 switch (info->mode) { 50 switch (info->mode) {
52 case XT_CONNMARK_SET: 51 case XT_CONNMARK_SET:
53 newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; 52 newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
54 if (shift_dir == D_SHIFT_RIGHT) 53 if (info->shift_dir == D_SHIFT_RIGHT)
55 newmark >>= shift_bits; 54 newmark >>= info->shift_bits;
56 else 55 else
57 newmark <<= shift_bits; 56 newmark <<= info->shift_bits;
57
58 if (ct->mark != newmark) { 58 if (ct->mark != newmark) {
59 ct->mark = newmark; 59 ct->mark = newmark;
60 nf_conntrack_event_cache(IPCT_MARK, ct); 60 nf_conntrack_event_cache(IPCT_MARK, ct);
61 } 61 }
62 break; 62 break;
63 case XT_CONNMARK_SAVE: 63 case XT_CONNMARK_SAVE:
64 newmark = (ct->mark & ~info->ctmask) ^ 64 new_targetmark = (skb->mark & info->nfmask);
65 (skb->mark & info->nfmask); 65 if (info->shift_dir == D_SHIFT_RIGHT)
66 if (shift_dir == D_SHIFT_RIGHT) 66 new_targetmark >>= info->shift_bits;
67 newmark >>= shift_bits;
68 else 67 else
69 newmark <<= shift_bits; 68 new_targetmark <<= info->shift_bits;
69
70 newmark = (ct->mark & ~info->ctmask) ^
71 new_targetmark;
70 if (ct->mark != newmark) { 72 if (ct->mark != newmark) {
71 ct->mark = newmark; 73 ct->mark = newmark;
72 nf_conntrack_event_cache(IPCT_MARK, ct); 74 nf_conntrack_event_cache(IPCT_MARK, ct);
73 } 75 }
74 break; 76 break;
75 case XT_CONNMARK_RESTORE: 77 case XT_CONNMARK_RESTORE:
76 newmark = (skb->mark & ~info->nfmask) ^ 78 new_targetmark = (ct->mark & info->ctmask);
77 (ct->mark & info->ctmask); 79 if (info->shift_dir == D_SHIFT_RIGHT)
78 if (shift_dir == D_SHIFT_RIGHT) 80 new_targetmark >>= info->shift_bits;
79 newmark >>= shift_bits;
80 else 81 else
81 newmark <<= shift_bits; 82 new_targetmark <<= info->shift_bits;
83
84 newmark = (skb->mark & ~info->nfmask) ^
85 new_targetmark;
82 skb->mark = newmark; 86 skb->mark = newmark;
83 break; 87 break;
84 } 88 }
@@ -89,8 +93,14 @@ static unsigned int
89connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) 93connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
90{ 94{
91 const struct xt_connmark_tginfo1 *info = par->targinfo; 95 const struct xt_connmark_tginfo1 *info = par->targinfo;
92 96 const struct xt_connmark_tginfo2 info2 = {
93 return connmark_tg_shift(skb, info, 0, 0); 97 .ctmark = info->ctmark,
98 .ctmask = info->ctmask,
99 .nfmask = info->nfmask,
100 .mode = info->mode,
101 };
102
103 return connmark_tg_shift(skb, &info2);
94} 104}
95 105
96static unsigned int 106static unsigned int
@@ -98,8 +108,7 @@ connmark_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
98{ 108{
99 const struct xt_connmark_tginfo2 *info = par->targinfo; 109 const struct xt_connmark_tginfo2 *info = par->targinfo;
100 110
101 return connmark_tg_shift(skb, (const struct xt_connmark_tginfo1 *)info, 111 return connmark_tg_shift(skb, info);
102 info->shift_bits, info->shift_dir);
103} 112}
104 113
105static int connmark_tg_check(const struct xt_tgchk_param *par) 114static int connmark_tg_check(const struct xt_tgchk_param *par)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c31b0687396a..01f3515cada0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -329,11 +329,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
329 skb_set_queue_mapping(skb, queue_index); 329 skb_set_queue_mapping(skb, queue_index);
330} 330}
331 331
332/* register_prot_hook must be invoked with the po->bind_lock held, 332/* __register_prot_hook must be invoked through register_prot_hook
333 * or from a context in which asynchronous accesses to the packet 333 * or from a context in which asynchronous accesses to the packet
334 * socket is not possible (packet_create()). 334 * socket is not possible (packet_create()).
335 */ 335 */
336static void register_prot_hook(struct sock *sk) 336static void __register_prot_hook(struct sock *sk)
337{ 337{
338 struct packet_sock *po = pkt_sk(sk); 338 struct packet_sock *po = pkt_sk(sk);
339 339
@@ -348,8 +348,13 @@ static void register_prot_hook(struct sock *sk)
348 } 348 }
349} 349}
350 350
351/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock 351static void register_prot_hook(struct sock *sk)
352 * held. If the sync parameter is true, we will temporarily drop 352{
353 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
354 __register_prot_hook(sk);
355}
356
357/* If the sync parameter is true, we will temporarily drop
353 * the po->bind_lock and do a synchronize_net to make sure no 358 * the po->bind_lock and do a synchronize_net to make sure no
354 * asynchronous packet processing paths still refer to the elements 359 * asynchronous packet processing paths still refer to the elements
355 * of po->prot_hook. If the sync parameter is false, it is the 360 * of po->prot_hook. If the sync parameter is false, it is the
@@ -359,6 +364,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
359{ 364{
360 struct packet_sock *po = pkt_sk(sk); 365 struct packet_sock *po = pkt_sk(sk);
361 366
367 lockdep_assert_held_once(&po->bind_lock);
368
362 po->running = 0; 369 po->running = 0;
363 370
364 if (po->fanout) 371 if (po->fanout)
@@ -3252,7 +3259,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
3252 3259
3253 if (proto) { 3260 if (proto) {
3254 po->prot_hook.type = proto; 3261 po->prot_hook.type = proto;
3255 register_prot_hook(sk); 3262 __register_prot_hook(sk);
3256 } 3263 }
3257 3264
3258 mutex_lock(&net->packet.sklist_lock); 3265 mutex_lock(&net->packet.sklist_lock);
@@ -3732,12 +3739,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3732 3739
3733 if (optlen != sizeof(val)) 3740 if (optlen != sizeof(val))
3734 return -EINVAL; 3741 return -EINVAL;
3735 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3736 return -EBUSY;
3737 if (copy_from_user(&val, optval, sizeof(val))) 3742 if (copy_from_user(&val, optval, sizeof(val)))
3738 return -EFAULT; 3743 return -EFAULT;
3739 po->tp_loss = !!val; 3744
3740 return 0; 3745 lock_sock(sk);
3746 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3747 ret = -EBUSY;
3748 } else {
3749 po->tp_loss = !!val;
3750 ret = 0;
3751 }
3752 release_sock(sk);
3753 return ret;
3741 } 3754 }
3742 case PACKET_AUXDATA: 3755 case PACKET_AUXDATA:
3743 { 3756 {
@@ -3748,7 +3761,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3748 if (copy_from_user(&val, optval, sizeof(val))) 3761 if (copy_from_user(&val, optval, sizeof(val)))
3749 return -EFAULT; 3762 return -EFAULT;
3750 3763
3764 lock_sock(sk);
3751 po->auxdata = !!val; 3765 po->auxdata = !!val;
3766 release_sock(sk);
3752 return 0; 3767 return 0;
3753 } 3768 }
3754 case PACKET_ORIGDEV: 3769 case PACKET_ORIGDEV:
@@ -3760,7 +3775,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3760 if (copy_from_user(&val, optval, sizeof(val))) 3775 if (copy_from_user(&val, optval, sizeof(val)))
3761 return -EFAULT; 3776 return -EFAULT;
3762 3777
3778 lock_sock(sk);
3763 po->origdev = !!val; 3779 po->origdev = !!val;
3780 release_sock(sk);
3764 return 0; 3781 return 0;
3765 } 3782 }
3766 case PACKET_VNET_HDR: 3783 case PACKET_VNET_HDR:
@@ -3769,15 +3786,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3769 3786
3770 if (sock->type != SOCK_RAW) 3787 if (sock->type != SOCK_RAW)
3771 return -EINVAL; 3788 return -EINVAL;
3772 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3773 return -EBUSY;
3774 if (optlen < sizeof(val)) 3789 if (optlen < sizeof(val))
3775 return -EINVAL; 3790 return -EINVAL;
3776 if (copy_from_user(&val, optval, sizeof(val))) 3791 if (copy_from_user(&val, optval, sizeof(val)))
3777 return -EFAULT; 3792 return -EFAULT;
3778 3793
3779 po->has_vnet_hdr = !!val; 3794 lock_sock(sk);
3780 return 0; 3795 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3796 ret = -EBUSY;
3797 } else {
3798 po->has_vnet_hdr = !!val;
3799 ret = 0;
3800 }
3801 release_sock(sk);
3802 return ret;
3781 } 3803 }
3782 case PACKET_TIMESTAMP: 3804 case PACKET_TIMESTAMP:
3783 { 3805 {
@@ -3815,11 +3837,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3815 3837
3816 if (optlen != sizeof(val)) 3838 if (optlen != sizeof(val))
3817 return -EINVAL; 3839 return -EINVAL;
3818 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3819 return -EBUSY;
3820 if (copy_from_user(&val, optval, sizeof(val))) 3840 if (copy_from_user(&val, optval, sizeof(val)))
3821 return -EFAULT; 3841 return -EFAULT;
3822 po->tp_tx_has_off = !!val; 3842
3843 lock_sock(sk);
3844 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3845 ret = -EBUSY;
3846 } else {
3847 po->tp_tx_has_off = !!val;
3848 ret = 0;
3849 }
3850 release_sock(sk);
3823 return 0; 3851 return 0;
3824 } 3852 }
3825 case PACKET_QDISC_BYPASS: 3853 case PACKET_QDISC_BYPASS:
diff --git a/net/packet/internal.h b/net/packet/internal.h
index a1d2b2319ae9..3bb7c5fb3bff 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -112,10 +112,12 @@ struct packet_sock {
112 int copy_thresh; 112 int copy_thresh;
113 spinlock_t bind_lock; 113 spinlock_t bind_lock;
114 struct mutex pg_vec_lock; 114 struct mutex pg_vec_lock;
115 unsigned int running:1, /* prot_hook is attached*/ 115 unsigned int running; /* bind_lock must be held */
116 auxdata:1, 116 unsigned int auxdata:1, /* writer must hold sock lock */
117 origdev:1, 117 origdev:1,
118 has_vnet_hdr:1; 118 has_vnet_hdr:1,
119 tp_loss:1,
120 tp_tx_has_off:1;
119 int pressure; 121 int pressure;
120 int ifindex; /* bound device */ 122 int ifindex; /* bound device */
121 __be16 num; 123 __be16 num;
@@ -125,8 +127,6 @@ struct packet_sock {
125 enum tpacket_versions tp_version; 127 enum tpacket_versions tp_version;
126 unsigned int tp_hdrlen; 128 unsigned int tp_hdrlen;
127 unsigned int tp_reserve; 129 unsigned int tp_reserve;
128 unsigned int tp_loss:1;
129 unsigned int tp_tx_has_off:1;
130 unsigned int tp_tstamp; 130 unsigned int tp_tstamp;
131 struct net_device __rcu *cached_dev; 131 struct net_device __rcu *cached_dev;
132 int (*xmit)(struct sk_buff *skb); 132 int (*xmit)(struct sk_buff *skb);
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index a5994cf0512b..8527cfdc446d 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -652,7 +652,7 @@ static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
652 } 652 }
653 } 653 }
654 654
655 return 0; 655 return -ENOENT;
656} 656}
657 657
658static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, 658static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
@@ -682,7 +682,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
682 u16 mtype; 682 u16 mtype;
683 u16 dlen; 683 u16 dlen;
684 684
685 curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL); 685 curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype,
686 &dlen, NULL);
687 if (!curr_data) {
688 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
689 return TC_ACT_SHOT;
690 }
686 691
687 if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) { 692 if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
688 /* abuse overlimits to count when we receive metadata 693 /* abuse overlimits to count when we receive metadata
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 805b139756db..092bebc70048 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -67,7 +67,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
67 67
68static void strp_start_timer(struct strparser *strp, long timeo) 68static void strp_start_timer(struct strparser *strp, long timeo)
69{ 69{
70 if (timeo) 70 if (timeo && timeo != LONG_MAX)
71 mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo); 71 mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo);
72} 72}
73 73
diff --git a/security/commoncap.c b/security/commoncap.c
index 48620c93d697..1ce701fcb3f3 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -449,6 +449,8 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
449 magic |= VFS_CAP_FLAGS_EFFECTIVE; 449 magic |= VFS_CAP_FLAGS_EFFECTIVE;
450 memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32); 450 memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
451 cap->magic_etc = cpu_to_le32(magic); 451 cap->magic_etc = cpu_to_le32(magic);
452 } else {
453 size = -ENOMEM;
452 } 454 }
453 } 455 }
454 kfree(tmpbuf); 456 kfree(tmpbuf);
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 9cf83f895d98..5e1ab2f0eb79 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -12,3 +12,6 @@ test_tcpbpf_user
12test_verifier_log 12test_verifier_log
13feature 13feature
14test_libbpf_open 14test_libbpf_open
15test_sock
16test_sock_addr
17urandom_read
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index 73bb20cfb9b7..f4d99fabc56d 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -13,6 +13,7 @@
13#include <bpf/bpf.h> 13#include <bpf/bpf.h>
14 14
15#include "cgroup_helpers.h" 15#include "cgroup_helpers.h"
16#include "bpf_rlimit.h"
16 17
17#ifndef ARRAY_SIZE 18#ifndef ARRAY_SIZE
18# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 19# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index d488f20926e8..2950f80ba7fb 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -15,6 +15,7 @@
15#include <bpf/libbpf.h> 15#include <bpf/libbpf.h>
16 16
17#include "cgroup_helpers.h" 17#include "cgroup_helpers.h"
18#include "bpf_rlimit.h"
18 19
19#define CG_PATH "/foo" 20#define CG_PATH "/foo"
20#define CONNECT4_PROG_PATH "./connect4_prog.o" 21#define CONNECT4_PROG_PATH "./connect4_prog.o"
diff --git a/tools/testing/selftests/bpf/test_sock_addr.sh b/tools/testing/selftests/bpf/test_sock_addr.sh
index c6e1dcf992c4..9832a875a828 100755
--- a/tools/testing/selftests/bpf/test_sock_addr.sh
+++ b/tools/testing/selftests/bpf/test_sock_addr.sh
@@ -4,7 +4,7 @@ set -eu
4 4
5ping_once() 5ping_once()
6{ 6{
7 ping -q -c 1 -W 1 ${1%%/*} >/dev/null 2>&1 7 ping -${1} -q -c 1 -W 1 ${2%%/*} >/dev/null 2>&1
8} 8}
9 9
10wait_for_ip() 10wait_for_ip()
@@ -13,7 +13,7 @@ wait_for_ip()
13 echo -n "Wait for testing IPv4/IPv6 to become available " 13 echo -n "Wait for testing IPv4/IPv6 to become available "
14 for _i in $(seq ${MAX_PING_TRIES}); do 14 for _i in $(seq ${MAX_PING_TRIES}); do
15 echo -n "." 15 echo -n "."
16 if ping_once ${TEST_IPv4} && ping_once ${TEST_IPv6}; then 16 if ping_once 4 ${TEST_IPv4} && ping_once 6 ${TEST_IPv6}; then
17 echo " OK" 17 echo " OK"
18 return 18 return
19 fi 19 fi
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
index 786dce7e48be..2aabab363cfb 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
@@ -29,7 +29,7 @@ do_reset
29 29
30echo "Test extended error support" 30echo "Test extended error support"
31echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_wakeup/trigger 31echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_wakeup/trigger
32echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger &>/dev/null 32! echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger 2> /dev/null
33if ! grep -q "ERROR:" events/sched/sched_wakeup/hist; then 33if ! grep -q "ERROR:" events/sched/sched_wakeup/hist; then
34 fail "Failed to generate extended error in histogram" 34 fail "Failed to generate extended error in histogram"
35fi 35fi
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
new file mode 100644
index 000000000000..c193dce611a2
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
@@ -0,0 +1,44 @@
1#!/bin/sh
2# description: event trigger - test multiple actions on hist trigger
3
4
5do_reset() {
6 reset_trigger
7 echo > set_event
8 clear_trace
9}
10
11fail() { #msg
12 do_reset
13 echo $1
14 exit_fail
15}
16
17if [ ! -f set_event ]; then
18 echo "event tracing is not supported"
19 exit_unsupported
20fi
21
22if [ ! -f synthetic_events ]; then
23 echo "synthetic event is not supported"
24 exit_unsupported
25fi
26
27clear_synthetic_events
28reset_tracer
29do_reset
30
31echo "Test multiple actions on hist trigger"
32echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events
33TRIGGER1=events/sched/sched_wakeup/trigger
34TRIGGER2=events/sched/sched_switch/trigger
35
36echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="cyclictest"' > $TRIGGER1
37echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0 if next_comm=="cyclictest"' >> $TRIGGER2
38echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,next_pid) if next_comm=="cyclictest"' >> $TRIGGER2
39echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,prev_pid) if next_comm=="cyclictest"' >> $TRIGGER2
40echo 'hist:keys=next_pid if next_comm=="cyclictest"' >> $TRIGGER2
41
42do_reset
43
44exit 0