aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/core-api/kernel-api.rst14
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt6
-rw-r--r--Documentation/process/index.rst1
-rw-r--r--Documentation/process/kernel-enforcement-statement.rst147
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/debug.S4
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi4
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1.dtsi4
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts16
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-zero-w.dts9
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b.dts5
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi7
-rw-r--r--arch/arm/boot/dts/gemini.dtsi3
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi8
-rw-r--r--arch/arm/boot/dts/moxart.dtsi3
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi1
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi16
-rw-r--r--arch/arm/kernel/debug.S8
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c4
-rw-r--r--arch/arm/mach-ux500/pm.c4
-rw-r--r--arch/arm/mm/nommu.c5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts9
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi6
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-firefly.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi4
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c2
-rw-r--r--arch/parisc/kernel/syscall.S6
-rw-r--r--arch/parisc/kernel/time.c5
-rw-r--r--arch/s390/configs/zfcpdump_defconfig2
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/x86/include/asm/io.h4
-rw-r--r--arch/x86/include/asm/tlbflush.h21
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c19
-rw-r--r--arch/x86/kernel/head32.c5
-rw-r--r--arch/x86/mm/mmap.c12
-rw-r--r--arch/x86/mm/tlb.c64
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c4
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c3
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/skd_main.c2
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/clocksource/cs5535-clockevt.c3
-rw-r--r--drivers/dma/altera-msgdma.c4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c63
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c7
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c9
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-ismt.c5
-rw-r--r--drivers/i2c/busses/i2c-omap.c14
-rw-r--r--drivers/i2c/busses/i2c-piix4.c162
-rw-r--r--drivers/input/input.c84
-rw-r--r--drivers/input/joydev.c70
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c29
-rw-r--r--drivers/input/misc/axp20x-pek.c2
-rw-r--r--drivers/input/misc/ims-pcu.c16
-rw-r--r--drivers/input/mouse/synaptics.c3
-rw-r--r--drivers/input/touchscreen/goodix.c67
-rw-r--r--drivers/input/touchscreen/stmfts.c6
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c43
-rw-r--r--drivers/irqchip/irq-tango.c2
-rw-r--r--drivers/media/cec/cec-adap.c13
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c25
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c50
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c22
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.c2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c1
-rw-r--r--drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c3
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c11
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.h2
-rw-r--r--drivers/media/tuners/mt2060.c59
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c2
-rw-r--r--drivers/net/can/flexcan.c91
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c10
-rw-r--r--drivers/net/dsa/mv88e6060.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c157
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c39
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c89
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c17
-rw-r--r--drivers/net/geneve.c6
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/wimax/i2400m/fw.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c197
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c2
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/of/of_mdio.c39
-rw-r--r--drivers/reset/reset-socfpga.c17
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_transport_fc.c3
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c4
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/crypto/keyinfo.c5
-rw-r--r--fs/direct-io.c39
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h24
-rw-r--r--fs/ecryptfs/keystore.c9
-rw-r--r--fs/exec.c1
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/fscache/object-list.c7
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/iomap.c41
-rw-r--r--fs/namespace.c3
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c11
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h1
-rw-r--r--fs/xfs/xfs_aops.c47
-rw-r--r--fs/xfs/xfs_fsmap.c48
-rw-r--r--fs/xfs/xfs_super.c2
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/input.h7
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/key.h47
-rw-r--r--include/linux/mbus.h4
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/mod_devicetable.h3
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/rculist.h2
-rw-r--r--include/linux/rcupdate.h22
-rw-r--r--include/linux/sched/mm.h16
-rw-r--r--include/linux/srcu.h1
-rw-r--r--include/net/inet_sock.h2
-rw-r--r--include/net/tcp.h5
-rw-r--r--include/sound/control.h3
-rw-r--r--include/uapi/linux/membarrier.h23
-rw-r--r--kernel/bpf/arraymap.c2
-rw-r--r--kernel/bpf/devmap.c10
-rw-r--r--kernel/bpf/hashtab.c4
-rw-r--r--kernel/bpf/sockmap.c28
-rw-r--r--kernel/bpf/verifier.c65
-rw-r--r--kernel/cpu.c5
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/irq/generic-chip.c15
-rw-r--r--kernel/rcu/srcutree.c2
-rw-r--r--kernel/rcu/sync.c9
-rw-r--r--kernel/rcu/tree.c18
-rw-r--r--kernel/sched/membarrier.c34
-rw-r--r--lib/digsig.c6
-rw-r--r--lib/ts_fsm.c2
-rw-r--r--lib/ts_kmp.c2
-rw-r--r--mm/memcontrol.c15
-rw-r--r--mm/percpu.c15
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/can/af_can.c20
-rw-r--r--net/can/bcm.c5
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/dev_ioctl.c13
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/filter.c31
-rw-r--r--net/core/rtnetlink.c13
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/core/sock.c8
-rw-r--r--net/core/sock_reuseport.c12
-rw-r--r--net/dccp/ipv4.c13
-rw-r--r--net/dns_resolver/dns_key.c2
-rw-r--r--net/ipv4/Kconfig8
-rw-r--r--net/ipv4/cipso_ipv4.c24
-rw-r--r--net/ipv4/inet_connection_sock.c9
-rw-r--r--net/ipv4/inet_hashtables.c5
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c22
-rw-r--r--net/ipv4/udp.c9
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/l2tp/l2tp_ppp.c3
-rw-r--r--net/mac80211/key.c21
-rw-r--r--net/ncsi/internal.h1
-rw-r--r--net/ncsi/ncsi-aen.c2
-rw-r--r--net/ncsi/ncsi-manage.c52
-rw-r--r--net/ncsi/ncsi-rsp.c2
-rw-r--r--net/netlink/af_netlink.c8
-rw-r--r--net/packet/af_packet.c24
-rw-r--r--net/rxrpc/af_rxrpc.c5
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/vmw_vsock/hyperv_transport.c22
-rw-r--r--samples/sockmap/sockmap_kern.c2
-rw-r--r--samples/trace_events/trace-events-sample.c14
-rw-r--r--security/commoncap.c3
-rw-r--r--security/keys/Kconfig1
-rw-r--r--security/keys/big_key.c4
-rw-r--r--security/keys/encrypted-keys/encrypted.c9
-rw-r--r--security/keys/gc.c8
-rw-r--r--security/keys/key.c41
-rw-r--r--security/keys/keyctl.c9
-rw-r--r--security/keys/keyring.c14
-rw-r--r--security/keys/permission.c7
-rw-r--r--security/keys/proc.c31
-rw-r--r--security/keys/process_keys.c2
-rw-r--r--security/keys/request_key.c7
-rw-r--r--security/keys/request_key_auth.c2
-rw-r--r--security/keys/trusted.c2
-rw-r--r--security/keys/user_defined.c4
-rw-r--r--sound/core/seq/seq_lock.c4
-rw-r--r--sound/core/seq/seq_lock.h12
-rw-r--r--sound/core/vmaster.c31
-rw-r--r--sound/hda/hdac_controller.c5
-rw-r--r--sound/pci/hda/hda_codec.c97
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/include/uapi/linux/bpf.h3
-rw-r--r--tools/objtool/check.c9
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rwxr-xr-xtools/perf/tests/shell/trace+probe_libc_inet_pton.sh9
-rw-r--r--tools/perf/ui/hist.c9
-rw-r--r--tools/perf/util/parse-events.l17
-rw-r--r--tools/perf/util/session.c2
-rw-r--r--tools/perf/util/xyarray.h4
-rw-r--r--tools/power/x86/turbostat/turbostat.c10
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/sockmap_verdict_prog.c4
-rw-r--r--tools/testing/selftests/bpf/test_maps.c12
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c510
281 files changed, 3304 insertions, 1361 deletions
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 8282099e0cbf..5da10184d908 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -352,44 +352,30 @@ Read-Copy Update (RCU)
352---------------------- 352----------------------
353 353
354.. kernel-doc:: include/linux/rcupdate.h 354.. kernel-doc:: include/linux/rcupdate.h
355 :external:
356 355
357.. kernel-doc:: include/linux/rcupdate_wait.h 356.. kernel-doc:: include/linux/rcupdate_wait.h
358 :external:
359 357
360.. kernel-doc:: include/linux/rcutree.h 358.. kernel-doc:: include/linux/rcutree.h
361 :external:
362 359
363.. kernel-doc:: kernel/rcu/tree.c 360.. kernel-doc:: kernel/rcu/tree.c
364 :external:
365 361
366.. kernel-doc:: kernel/rcu/tree_plugin.h 362.. kernel-doc:: kernel/rcu/tree_plugin.h
367 :external:
368 363
369.. kernel-doc:: kernel/rcu/tree_exp.h 364.. kernel-doc:: kernel/rcu/tree_exp.h
370 :external:
371 365
372.. kernel-doc:: kernel/rcu/update.c 366.. kernel-doc:: kernel/rcu/update.c
373 :external:
374 367
375.. kernel-doc:: include/linux/srcu.h 368.. kernel-doc:: include/linux/srcu.h
376 :external:
377 369
378.. kernel-doc:: kernel/rcu/srcutree.c 370.. kernel-doc:: kernel/rcu/srcutree.c
379 :external:
380 371
381.. kernel-doc:: include/linux/rculist_bl.h 372.. kernel-doc:: include/linux/rculist_bl.h
382 :external:
383 373
384.. kernel-doc:: include/linux/rculist.h 374.. kernel-doc:: include/linux/rculist.h
385 :external:
386 375
387.. kernel-doc:: include/linux/rculist_nulls.h 376.. kernel-doc:: include/linux/rculist_nulls.h
388 :external:
389 377
390.. kernel-doc:: include/linux/rcu_sync.h 378.. kernel-doc:: include/linux/rcu_sync.h
391 :external:
392 379
393.. kernel-doc:: kernel/rcu/sync.c 380.. kernel-doc:: kernel/rcu/sync.c
394 :external:
395 381
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 4c29cdab0ea5..5eb108e180fa 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -99,7 +99,7 @@ Examples:
99 compatible = "arm,gic-v3-its"; 99 compatible = "arm,gic-v3-its";
100 msi-controller; 100 msi-controller;
101 #msi-cells = <1>; 101 #msi-cells = <1>;
102 reg = <0x0 0x2c200000 0 0x200000>; 102 reg = <0x0 0x2c200000 0 0x20000>;
103 }; 103 };
104 }; 104 };
105 105
@@ -124,14 +124,14 @@ Examples:
124 compatible = "arm,gic-v3-its"; 124 compatible = "arm,gic-v3-its";
125 msi-controller; 125 msi-controller;
126 #msi-cells = <1>; 126 #msi-cells = <1>;
127 reg = <0x0 0x2c200000 0 0x200000>; 127 reg = <0x0 0x2c200000 0 0x20000>;
128 }; 128 };
129 129
130 gic-its@2c400000 { 130 gic-its@2c400000 {
131 compatible = "arm,gic-v3-its"; 131 compatible = "arm,gic-v3-its";
132 msi-controller; 132 msi-controller;
133 #msi-cells = <1>; 133 #msi-cells = <1>;
134 reg = <0x0 0x2c400000 0 0x200000>; 134 reg = <0x0 0x2c400000 0 0x20000>;
135 }; 135 };
136 136
137 ppi-partitions { 137 ppi-partitions {
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 82fc399fcd33..61e43cc3ed17 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -25,6 +25,7 @@ Below are the essential guides that every developer should read.
25 submitting-patches 25 submitting-patches
26 coding-style 26 coding-style
27 email-clients 27 email-clients
28 kernel-enforcement-statement
28 29
29Other guides to the community that are of interest to most developers are: 30Other guides to the community that are of interest to most developers are:
30 31
diff --git a/Documentation/process/kernel-enforcement-statement.rst b/Documentation/process/kernel-enforcement-statement.rst
new file mode 100644
index 000000000000..1e23d4227337
--- /dev/null
+++ b/Documentation/process/kernel-enforcement-statement.rst
@@ -0,0 +1,147 @@
1Linux Kernel Enforcement Statement
2----------------------------------
3
4As developers of the Linux kernel, we have a keen interest in how our software
5is used and how the license for our software is enforced. Compliance with the
6reciprocal sharing obligations of GPL-2.0 is critical to the long-term
7sustainability of our software and community.
8
9Although there is a right to enforce the separate copyright interests in the
10contributions made to our community, we share an interest in ensuring that
11individual enforcement actions are conducted in a manner that benefits our
12community and do not have an unintended negative impact on the health and
13growth of our software ecosystem. In order to deter unhelpful enforcement
14actions, we agree that it is in the best interests of our development
15community to undertake the following commitment to users of the Linux kernel
16on behalf of ourselves and any successors to our copyright interests:
17
18 Notwithstanding the termination provisions of the GPL-2.0, we agree that
19 it is in the best interests of our development community to adopt the
20 following provisions of GPL-3.0 as additional permissions under our
21 license with respect to any non-defensive assertion of rights under the
22 license.
23
24 However, if you cease all violation of this License, then your license
25 from a particular copyright holder is reinstated (a) provisionally,
26 unless and until the copyright holder explicitly and finally
27 terminates your license, and (b) permanently, if the copyright holder
28 fails to notify you of the violation by some reasonable means prior to
29 60 days after the cessation.
30
31 Moreover, your license from a particular copyright holder is
32 reinstated permanently if the copyright holder notifies you of the
33 violation by some reasonable means, this is the first time you have
34 received notice of violation of this License (for any work) from that
35 copyright holder, and you cure the violation prior to 30 days after
36 your receipt of the notice.
37
38Our intent in providing these assurances is to encourage more use of the
39software. We want companies and individuals to use, modify and distribute
40this software. We want to work with users in an open and transparent way to
41eliminate any uncertainty about our expectations regarding compliance or
42enforcement that might limit adoption of our software. We view legal action
43as a last resort, to be initiated only when other community efforts have
44failed to resolve the problem.
45
46Finally, once a non-compliance issue is resolved, we hope the user will feel
47welcome to join us in our efforts on this project. Working together, we will
48be stronger.
49
50Except where noted below, we speak only for ourselves, and not for any company
51we might work for today, have in the past, or will in the future.
52
53 - Bjorn Andersson (Linaro)
54 - Andrea Arcangeli (Red Hat)
55 - Neil Armstrong
56 - Jens Axboe
57 - Pablo Neira Ayuso
58 - Khalid Aziz
59 - Ralf Baechle
60 - Felipe Balbi
61 - Arnd Bergmann
62 - Ard Biesheuvel
63 - Paolo Bonzini (Red Hat)
64 - Christian Borntraeger
65 - Mark Brown (Linaro)
66 - Paul Burton
67 - Javier Martinez Canillas
68 - Rob Clark
69 - Jonathan Corbet
70 - Vivien Didelot (Savoir-faire Linux)
71 - Hans de Goede (Red Hat)
72 - Mel Gorman (SUSE)
73 - Sven Eckelmann
74 - Alex Elder (Linaro)
75 - Fabio Estevam
76 - Larry Finger
77 - Bhumika Goyal
78 - Andy Gross
79 - Juergen Gross
80 - Shawn Guo
81 - Ulf Hansson
82 - Tejun Heo
83 - Rob Herring
84 - Masami Hiramatsu
85 - Michal Hocko
86 - Simon Horman
87 - Johan Hovold (Hovold Consulting AB)
88 - Christophe JAILLET
89 - Olof Johansson
90 - Lee Jones (Linaro)
91 - Heiner Kallweit
92 - Srinivas Kandagatla
93 - Jan Kara
94 - Shuah Khan (Samsung)
95 - David Kershner
96 - Jaegeuk Kim
97 - Namhyung Kim
98 - Colin Ian King
99 - Jeff Kirsher
100 - Greg Kroah-Hartman (Linux Foundation)
101 - Christian König
102 - Vinod Koul
103 - Krzysztof Kozlowski
104 - Viresh Kumar
105 - Aneesh Kumar K.V
106 - Julia Lawall
107 - Doug Ledford (Red Hat)
108 - Chuck Lever (Oracle)
109 - Daniel Lezcano
110 - Shaohua Li
111 - Xin Long (Red Hat)
112 - Tony Luck
113 - Mike Marshall
114 - Chris Mason
115 - Paul E. McKenney
116 - David S. Miller
117 - Ingo Molnar
118 - Kuninori Morimoto
119 - Borislav Petkov
120 - Jiri Pirko
121 - Josh Poimboeuf
122 - Sebastian Reichel (Collabora)
123 - Guenter Roeck
124 - Joerg Roedel
125 - Leon Romanovsky
126 - Steven Rostedt (VMware)
127 - Ivan Safonov
128 - Ivan Safonov
129 - Anna Schumaker
130 - Jes Sorensen
131 - K.Y. Srinivasan
132 - Heiko Stuebner
133 - Jiri Kosina (SUSE)
134 - Dmitry Torokhov
135 - Linus Torvalds
136 - Thierry Reding
137 - Rik van Riel
138 - Geert Uytterhoeven (Glider bvba)
139 - Daniel Vetter
140 - Linus Walleij
141 - Richard Weinberger
142 - Dan Williams
143 - Rafael J. Wysocki
144 - Arvind Yadav
145 - Masahiro Yamada
146 - Wei Yongjun
147 - Lv Zheng
diff --git a/MAINTAINERS b/MAINTAINERS
index 1f33ae356003..d85c08956875 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10559,6 +10559,8 @@ M: Peter Zijlstra <peterz@infradead.org>
10559M: Ingo Molnar <mingo@redhat.com> 10559M: Ingo Molnar <mingo@redhat.com>
10560M: Arnaldo Carvalho de Melo <acme@kernel.org> 10560M: Arnaldo Carvalho de Melo <acme@kernel.org>
10561R: Alexander Shishkin <alexander.shishkin@linux.intel.com> 10561R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
10562R: Jiri Olsa <jolsa@redhat.com>
10563R: Namhyung Kim <namhyung@kernel.org>
10562L: linux-kernel@vger.kernel.org 10564L: linux-kernel@vger.kernel.org
10563T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core 10565T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
10564S: Supported 10566S: Supported
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 47d3a1ab08d2..817e5cfef83a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -131,7 +131,7 @@ endif
131KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm 131KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
132KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float 132KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
133 133
134CHECKFLAGS += -D__arm__ 134CHECKFLAGS += -D__arm__ -m32
135 135
136#Default value 136#Default value
137head-y := arch/arm/kernel/head$(MMUEXT).o 137head-y := arch/arm/kernel/head$(MMUEXT).o
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S
index 5392ee63338f..8f6e37177de1 100644
--- a/arch/arm/boot/compressed/debug.S
+++ b/arch/arm/boot/compressed/debug.S
@@ -23,7 +23,11 @@ ENTRY(putc)
23 strb r0, [r1] 23 strb r0, [r1]
24 mov r0, #0x03 @ SYS_WRITEC 24 mov r0, #0x03 @ SYS_WRITEC
25 ARM( svc #0x123456 ) 25 ARM( svc #0x123456 )
26#ifdef CONFIG_CPU_V7M
27 THUMB( bkpt #0xab )
28#else
26 THUMB( svc #0xab ) 29 THUMB( svc #0xab )
30#endif
27 mov pc, lr 31 mov pc, lr
28 .align 2 32 .align 2
291: .word _GLOBAL_OFFSET_TABLE_ - . 331: .word _GLOBAL_OFFSET_TABLE_ - .
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 7ff0811e61db..4960722aab32 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -178,7 +178,7 @@
178 }; 178 };
179 179
180 i2c0: i2c@11000 { 180 i2c0: i2c@11000 {
181 compatible = "marvell,mv64xxx-i2c"; 181 compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
182 reg = <0x11000 0x20>; 182 reg = <0x11000 0x20>;
183 #address-cells = <1>; 183 #address-cells = <1>;
184 #size-cells = <0>; 184 #size-cells = <0>;
@@ -189,7 +189,7 @@
189 }; 189 };
190 190
191 i2c1: i2c@11100 { 191 i2c1: i2c@11100 {
192 compatible = "marvell,mv64xxx-i2c"; 192 compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
193 reg = <0x11100 0x20>; 193 reg = <0x11100 0x20>;
194 #address-cells = <1>; 194 #address-cells = <1>;
195 #size-cells = <0>; 195 #size-cells = <0>;
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
index 63a5af898165..cf0087b4c9e1 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
@@ -67,8 +67,8 @@
67 pinctrl-0 = <&pinctrl_macb0_default>; 67 pinctrl-0 = <&pinctrl_macb0_default>;
68 phy-mode = "rmii"; 68 phy-mode = "rmii";
69 69
70 ethernet-phy@1 { 70 ethernet-phy@0 {
71 reg = <0x1>; 71 reg = <0x0>;
72 interrupt-parent = <&pioA>; 72 interrupt-parent = <&pioA>;
73 interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>; 73 interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
74 pinctrl-names = "default"; 74 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index c7e9ccf2bc87..cbc26001247b 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -309,7 +309,7 @@
309 vddana-supply = <&vdd_3v3_lp_reg>; 309 vddana-supply = <&vdd_3v3_lp_reg>;
310 vref-supply = <&vdd_3v3_lp_reg>; 310 vref-supply = <&vdd_3v3_lp_reg>;
311 pinctrl-names = "default"; 311 pinctrl-names = "default";
312 pinctrl-0 = <&pinctrl_adc_default>; 312 pinctrl-0 = <&pinctrl_adc_default &pinctrl_adtrg_default>;
313 status = "okay"; 313 status = "okay";
314 }; 314 };
315 315
@@ -340,6 +340,20 @@
340 bias-disable; 340 bias-disable;
341 }; 341 };
342 342
343 /*
344 * The ADTRG pin can work on any edge type.
345 * In here it's being pulled up, so need to
346 * connect it to ground to get an edge e.g.
347 * Trigger can be configured on falling, rise
348 * or any edge, and the pull-up can be changed
349 * to pull-down or left floating according to
350 * needs.
351 */
352 pinctrl_adtrg_default: adtrg_default {
353 pinmux = <PIN_PD31__ADTRG>;
354 bias-pull-up;
355 };
356
343 pinctrl_charger_chglev: charger_chglev { 357 pinctrl_charger_chglev: charger_chglev {
344 pinmux = <PIN_PA12__GPIO>; 358 pinmux = <PIN_PA12__GPIO>;
345 bias-disable; 359 bias-disable;
diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
index 82651c3eb682..b8565fc33eea 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
@@ -18,12 +18,9 @@
18 compatible = "raspberrypi,model-zero-w", "brcm,bcm2835"; 18 compatible = "raspberrypi,model-zero-w", "brcm,bcm2835";
19 model = "Raspberry Pi Zero W"; 19 model = "Raspberry Pi Zero W";
20 20
21 /* Needed by firmware to properly init UARTs */ 21 chosen {
22 aliases { 22 /* 8250 auxiliary UART instead of pl011 */
23 uart0 = "/soc/serial@7e201000"; 23 stdout-path = "serial1:115200n8";
24 uart1 = "/soc/serial@7e215040";
25 serial0 = "/soc/serial@7e201000";
26 serial1 = "/soc/serial@7e215040";
27 }; 24 };
28 25
29 leds { 26 leds {
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
index 20725ca487f3..c71a0d73d2a2 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
@@ -8,6 +8,11 @@
8 compatible = "raspberrypi,3-model-b", "brcm,bcm2837"; 8 compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
9 model = "Raspberry Pi 3 Model B"; 9 model = "Raspberry Pi 3 Model B";
10 10
11 chosen {
12 /* 8250 auxiliary UART instead of pl011 */
13 stdout-path = "serial1:115200n8";
14 };
15
11 memory { 16 memory {
12 reg = <0 0x40000000>; 17 reg = <0 0x40000000>;
13 }; 18 };
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 431dcfc900c0..013431e3d7c3 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -20,8 +20,13 @@
20 #address-cells = <1>; 20 #address-cells = <1>;
21 #size-cells = <1>; 21 #size-cells = <1>;
22 22
23 aliases {
24 serial0 = &uart0;
25 serial1 = &uart1;
26 };
27
23 chosen { 28 chosen {
24 bootargs = "earlyprintk console=ttyAMA0"; 29 stdout-path = "serial0:115200n8";
25 }; 30 };
26 31
27 thermal-zones { 32 thermal-zones {
diff --git a/arch/arm/boot/dts/gemini.dtsi b/arch/arm/boot/dts/gemini.dtsi
index c68e8d430234..f0d178c77153 100644
--- a/arch/arm/boot/dts/gemini.dtsi
+++ b/arch/arm/boot/dts/gemini.dtsi
@@ -145,11 +145,12 @@
145 }; 145 };
146 146
147 watchdog@41000000 { 147 watchdog@41000000 {
148 compatible = "cortina,gemini-watchdog"; 148 compatible = "cortina,gemini-watchdog", "faraday,ftwdt010";
149 reg = <0x41000000 0x1000>; 149 reg = <0x41000000 0x1000>;
150 interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; 150 interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
151 resets = <&syscon GEMINI_RESET_WDOG>; 151 resets = <&syscon GEMINI_RESET_WDOG>;
152 clocks = <&syscon GEMINI_CLK_APB>; 152 clocks = <&syscon GEMINI_CLK_APB>;
153 clock-names = "PCLK";
153 }; 154 };
154 155
155 uart0: serial@42000000 { 156 uart0: serial@42000000 {
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index f46814a7ea44..4d308d17f040 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -144,10 +144,10 @@
144 interrupt-names = "msi"; 144 interrupt-names = "msi";
145 #interrupt-cells = <1>; 145 #interrupt-cells = <1>;
146 interrupt-map-mask = <0 0 0 0x7>; 146 interrupt-map-mask = <0 0 0 0x7>;
147 interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>, 147 interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
148 <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, 148 <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
149 <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, 149 <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
150 <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>; 150 <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
151 clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, 151 clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
152 <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, 152 <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
153 <&clks IMX7D_PCIE_PHY_ROOT_CLK>; 153 <&clks IMX7D_PCIE_PHY_ROOT_CLK>;
diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi
index 1f4c795d3f72..da7b3237bfe9 100644
--- a/arch/arm/boot/dts/moxart.dtsi
+++ b/arch/arm/boot/dts/moxart.dtsi
@@ -87,9 +87,10 @@
87 }; 87 };
88 88
89 watchdog: watchdog@98500000 { 89 watchdog: watchdog@98500000 {
90 compatible = "moxa,moxart-watchdog"; 90 compatible = "moxa,moxart-watchdog", "faraday,ftwdt010";
91 reg = <0x98500000 0x10>; 91 reg = <0x98500000 0x10>;
92 clocks = <&clk_apb>; 92 clocks = <&clk_apb>;
93 clock-names = "PCLK";
93 }; 94 };
94 95
95 sdhci: sdhci@98e00000 { 96 sdhci: sdhci@98e00000 {
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 38d2216c7ead..b1a26b42d190 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -1430,6 +1430,7 @@
1430 atmel,min-sample-rate-hz = <200000>; 1430 atmel,min-sample-rate-hz = <200000>;
1431 atmel,max-sample-rate-hz = <20000000>; 1431 atmel,max-sample-rate-hz = <20000000>;
1432 atmel,startup-time-ms = <4>; 1432 atmel,startup-time-ms = <4>;
1433 atmel,trigger-edge-type = <IRQ_TYPE_EDGE_RISING>;
1433 status = "disabled"; 1434 status = "disabled";
1434 }; 1435 };
1435 1436
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index b147cb0dc14b..eef072a21acc 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -311,8 +311,8 @@
311 #size-cells = <0>; 311 #size-cells = <0>;
312 reg = <0>; 312 reg = <0>;
313 313
314 tcon1_in_drc1: endpoint@0 { 314 tcon1_in_drc1: endpoint@1 {
315 reg = <0>; 315 reg = <1>;
316 remote-endpoint = <&drc1_out_tcon1>; 316 remote-endpoint = <&drc1_out_tcon1>;
317 }; 317 };
318 }; 318 };
@@ -1012,8 +1012,8 @@
1012 #size-cells = <0>; 1012 #size-cells = <0>;
1013 reg = <1>; 1013 reg = <1>;
1014 1014
1015 be1_out_drc1: endpoint@0 { 1015 be1_out_drc1: endpoint@1 {
1016 reg = <0>; 1016 reg = <1>;
1017 remote-endpoint = <&drc1_in_be1>; 1017 remote-endpoint = <&drc1_in_be1>;
1018 }; 1018 };
1019 }; 1019 };
@@ -1042,8 +1042,8 @@
1042 #size-cells = <0>; 1042 #size-cells = <0>;
1043 reg = <0>; 1043 reg = <0>;
1044 1044
1045 drc1_in_be1: endpoint@0 { 1045 drc1_in_be1: endpoint@1 {
1046 reg = <0>; 1046 reg = <1>;
1047 remote-endpoint = <&be1_out_drc1>; 1047 remote-endpoint = <&be1_out_drc1>;
1048 }; 1048 };
1049 }; 1049 };
@@ -1053,8 +1053,8 @@
1053 #size-cells = <0>; 1053 #size-cells = <0>;
1054 reg = <1>; 1054 reg = <1>;
1055 1055
1056 drc1_out_tcon1: endpoint@0 { 1056 drc1_out_tcon1: endpoint@1 {
1057 reg = <0>; 1057 reg = <1>;
1058 remote-endpoint = <&tcon1_in_drc1>; 1058 remote-endpoint = <&tcon1_in_drc1>;
1059 }; 1059 };
1060 }; 1060 };
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index ea9646cc2a0e..0a498cb3fad8 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -115,7 +115,11 @@ ENTRY(printascii)
115 mov r1, r0 115 mov r1, r0
116 mov r0, #0x04 @ SYS_WRITE0 116 mov r0, #0x04 @ SYS_WRITE0
117 ARM( svc #0x123456 ) 117 ARM( svc #0x123456 )
118#ifdef CONFIG_CPU_V7M
119 THUMB( bkpt #0xab )
120#else
118 THUMB( svc #0xab ) 121 THUMB( svc #0xab )
122#endif
119 ret lr 123 ret lr
120ENDPROC(printascii) 124ENDPROC(printascii)
121 125
@@ -124,7 +128,11 @@ ENTRY(printch)
124 strb r0, [r1] 128 strb r0, [r1]
125 mov r0, #0x03 @ SYS_WRITEC 129 mov r0, #0x03 @ SYS_WRITEC
126 ARM( svc #0x123456 ) 130 ARM( svc #0x123456 )
131#ifdef CONFIG_CPU_V7M
132 THUMB( bkpt #0xab )
133#else
127 THUMB( svc #0xab ) 134 THUMB( svc #0xab )
135#endif
128 ret lr 136 ret lr
129ENDPROC(printch) 137ENDPROC(printch)
130 138
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 71a34e8c345a..57058ac46f49 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -32,6 +32,7 @@
32#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
33 33
34#include "db8500-regs.h" 34#include "db8500-regs.h"
35#include "pm_domains.h"
35 36
36static int __init ux500_l2x0_unlock(void) 37static int __init ux500_l2x0_unlock(void)
37{ 38{
@@ -157,6 +158,9 @@ static const struct of_device_id u8500_local_bus_nodes[] = {
157 158
158static void __init u8500_init_machine(void) 159static void __init u8500_init_machine(void)
159{ 160{
161 /* Initialize ux500 power domains */
162 ux500_pm_domains_init();
163
160 /* automatically probe child nodes of dbx5x0 devices */ 164 /* automatically probe child nodes of dbx5x0 devices */
161 if (of_machine_is_compatible("st-ericsson,u8540")) 165 if (of_machine_is_compatible("st-ericsson,u8540"))
162 of_platform_populate(NULL, u8500_local_bus_nodes, 166 of_platform_populate(NULL, u8500_local_bus_nodes,
diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
index a970e7fcba9e..f6c33a0c1c61 100644
--- a/arch/arm/mach-ux500/pm.c
+++ b/arch/arm/mach-ux500/pm.c
@@ -19,7 +19,6 @@
19#include <linux/of_address.h> 19#include <linux/of_address.h>
20 20
21#include "db8500-regs.h" 21#include "db8500-regs.h"
22#include "pm_domains.h"
23 22
24/* ARM WFI Standby signal register */ 23/* ARM WFI Standby signal register */
25#define PRCM_ARM_WFI_STANDBY (prcmu_base + 0x130) 24#define PRCM_ARM_WFI_STANDBY (prcmu_base + 0x130)
@@ -203,7 +202,4 @@ void __init ux500_pm_init(u32 phy_base, u32 size)
203 202
204 /* Set up ux500 suspend callbacks. */ 203 /* Set up ux500 suspend callbacks. */
205 suspend_set_ops(UX500_SUSPEND_OPS); 204 suspend_set_ops(UX500_SUSPEND_OPS);
206
207 /* Initialize ux500 power domains */
208 ux500_pm_domains_init();
209} 205}
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 3b8e728cc944..91537d90f5f5 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -344,6 +344,11 @@ void __init arm_mm_memblock_reserve(void)
344 * reserved here. 344 * reserved here.
345 */ 345 */
346#endif 346#endif
347 /*
348 * In any case, always ensure address 0 is never used as many things
349 * get very confused if 0 is returned as a legitimate address.
350 */
351 memblock_reserve(0, 1);
347} 352}
348 353
349void __init adjust_lowmem_bounds(void) 354void __init adjust_lowmem_bounds(void)
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index caf8b6fbe5e3..d06e34b5d192 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -61,13 +61,6 @@
61 chosen { 61 chosen {
62 stdout-path = "serial0:115200n8"; 62 stdout-path = "serial0:115200n8";
63 }; 63 };
64
65 reg_vcc3v3: vcc3v3 {
66 compatible = "regulator-fixed";
67 regulator-name = "vcc3v3";
68 regulator-min-microvolt = <3300000>;
69 regulator-max-microvolt = <3300000>;
70 };
71}; 64};
72 65
73&ehci0 { 66&ehci0 {
@@ -91,7 +84,7 @@
91&mmc0 { 84&mmc0 {
92 pinctrl-names = "default"; 85 pinctrl-names = "default";
93 pinctrl-0 = <&mmc0_pins>; 86 pinctrl-0 = <&mmc0_pins>;
94 vmmc-supply = <&reg_vcc3v3>; 87 vmmc-supply = <&reg_dcdc1>;
95 cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; 88 cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
96 cd-inverted; 89 cd-inverted;
97 disable-wp; 90 disable-wp;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index 8263a8a504a8..f2aa2a81de4d 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -336,7 +336,7 @@
336 /* non-prefetchable memory */ 336 /* non-prefetchable memory */
337 0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>; 337 0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>;
338 interrupt-map-mask = <0 0 0 0>; 338 interrupt-map-mask = <0 0 0 0>;
339 interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; 339 interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
340 interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; 340 interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
341 num-lanes = <1>; 341 num-lanes = <1>;
342 clocks = <&cpm_clk 1 13>; 342 clocks = <&cpm_clk 1 13>;
@@ -362,7 +362,7 @@
362 /* non-prefetchable memory */ 362 /* non-prefetchable memory */
363 0x82000000 0 0xf7000000 0 0xf7000000 0 0xf00000>; 363 0x82000000 0 0xf7000000 0 0xf7000000 0 0xf00000>;
364 interrupt-map-mask = <0 0 0 0>; 364 interrupt-map-mask = <0 0 0 0>;
365 interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; 365 interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
366 interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; 366 interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
367 367
368 num-lanes = <1>; 368 num-lanes = <1>;
@@ -389,7 +389,7 @@
389 /* non-prefetchable memory */ 389 /* non-prefetchable memory */
390 0x82000000 0 0xf8000000 0 0xf8000000 0 0xf00000>; 390 0x82000000 0 0xf8000000 0 0xf8000000 0 0xf00000>;
391 interrupt-map-mask = <0 0 0 0>; 391 interrupt-map-mask = <0 0 0 0>;
392 interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; 392 interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
393 interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; 393 interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
394 394
395 num-lanes = <1>; 395 num-lanes = <1>;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index b71ee6c83668..4fe70323abb3 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -335,7 +335,7 @@
335 /* non-prefetchable memory */ 335 /* non-prefetchable memory */
336 0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>; 336 0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>;
337 interrupt-map-mask = <0 0 0 0>; 337 interrupt-map-mask = <0 0 0 0>;
338 interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; 338 interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
339 interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; 339 interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
340 num-lanes = <1>; 340 num-lanes = <1>;
341 clocks = <&cps_clk 1 13>; 341 clocks = <&cps_clk 1 13>;
@@ -361,7 +361,7 @@
361 /* non-prefetchable memory */ 361 /* non-prefetchable memory */
362 0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>; 362 0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>;
363 interrupt-map-mask = <0 0 0 0>; 363 interrupt-map-mask = <0 0 0 0>;
364 interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; 364 interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
365 interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; 365 interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
366 366
367 num-lanes = <1>; 367 num-lanes = <1>;
@@ -388,7 +388,7 @@
388 /* non-prefetchable memory */ 388 /* non-prefetchable memory */
389 0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>; 389 0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>;
390 interrupt-map-mask = <0 0 0 0>; 390 interrupt-map-mask = <0 0 0 0>;
391 interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; 391 interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
392 interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; 392 interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
393 393
394 num-lanes = <1>; 394 num-lanes = <1>;
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index 4786c67b5e65..d9d885006a8e 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -62,6 +62,7 @@
62 brightness-levels = <256 128 64 16 8 4 0>; 62 brightness-levels = <256 128 64 16 8 4 0>;
63 default-brightness-level = <6>; 63 default-brightness-level = <6>;
64 64
65 power-supply = <&reg_12v>;
65 enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>; 66 enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
66 }; 67 };
67 68
@@ -83,6 +84,15 @@
83 regulator-always-on; 84 regulator-always-on;
84 }; 85 };
85 86
87 reg_12v: regulator2 {
88 compatible = "regulator-fixed";
89 regulator-name = "fixed-12V";
90 regulator-min-microvolt = <12000000>;
91 regulator-max-microvolt = <12000000>;
92 regulator-boot-on;
93 regulator-always-on;
94 };
95
86 rsnd_ak4613: sound { 96 rsnd_ak4613: sound {
87 compatible = "simple-audio-card"; 97 compatible = "simple-audio-card";
88 98
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 6d615cb6e64d..41d61840fb99 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -582,7 +582,7 @@
582 vop_mmu: iommu@ff373f00 { 582 vop_mmu: iommu@ff373f00 {
583 compatible = "rockchip,iommu"; 583 compatible = "rockchip,iommu";
584 reg = <0x0 0xff373f00 0x0 0x100>; 584 reg = <0x0 0xff373f00 0x0 0x100>;
585 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>; 585 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
586 interrupt-names = "vop_mmu"; 586 interrupt-names = "vop_mmu";
587 #iommu-cells = <0>; 587 #iommu-cells = <0>;
588 status = "disabled"; 588 status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 19fbaa5e7bdd..1070c8264c13 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -740,7 +740,7 @@
740 iep_mmu: iommu@ff900800 { 740 iep_mmu: iommu@ff900800 {
741 compatible = "rockchip,iommu"; 741 compatible = "rockchip,iommu";
742 reg = <0x0 0xff900800 0x0 0x100>; 742 reg = <0x0 0xff900800 0x0 0x100>;
743 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>; 743 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
744 interrupt-names = "iep_mmu"; 744 interrupt-names = "iep_mmu";
745 #iommu-cells = <0>; 745 #iommu-cells = <0>;
746 status = "disabled"; 746 status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
index 7fd4bfcaa38e..fef82274a39d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
@@ -371,10 +371,10 @@
371 regulator-always-on; 371 regulator-always-on;
372 regulator-boot-on; 372 regulator-boot-on;
373 regulator-min-microvolt = <1800000>; 373 regulator-min-microvolt = <1800000>;
374 regulator-max-microvolt = <3300000>; 374 regulator-max-microvolt = <3000000>;
375 regulator-state-mem { 375 regulator-state-mem {
376 regulator-on-in-suspend; 376 regulator-on-in-suspend;
377 regulator-suspend-microvolt = <3300000>; 377 regulator-suspend-microvolt = <3000000>;
378 }; 378 };
379 }; 379 };
380 380
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 53ff3d191a1d..910628d18add 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -325,12 +325,12 @@
325 vcc_sd: LDO_REG4 { 325 vcc_sd: LDO_REG4 {
326 regulator-name = "vcc_sd"; 326 regulator-name = "vcc_sd";
327 regulator-min-microvolt = <1800000>; 327 regulator-min-microvolt = <1800000>;
328 regulator-max-microvolt = <3300000>; 328 regulator-max-microvolt = <3000000>;
329 regulator-always-on; 329 regulator-always-on;
330 regulator-boot-on; 330 regulator-boot-on;
331 regulator-state-mem { 331 regulator-state-mem {
332 regulator-on-in-suspend; 332 regulator-on-in-suspend;
333 regulator-suspend-microvolt = <3300000>; 333 regulator-suspend-microvolt = <3000000>;
334 }; 334 };
335 }; 335 };
336 336
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
index 6c30bb02210d..0f873c897d0d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
@@ -315,10 +315,10 @@
315 regulator-always-on; 315 regulator-always-on;
316 regulator-boot-on; 316 regulator-boot-on;
317 regulator-min-microvolt = <1800000>; 317 regulator-min-microvolt = <1800000>;
318 regulator-max-microvolt = <3300000>; 318 regulator-max-microvolt = <3000000>;
319 regulator-state-mem { 319 regulator-state-mem {
320 regulator-on-in-suspend; 320 regulator-on-in-suspend;
321 regulator-suspend-microvolt = <3300000>; 321 regulator-suspend-microvolt = <3000000>;
322 }; 322 };
323 }; 323 };
324 324
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index c6d6272a934f..7baa2265d439 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -35,12 +35,12 @@ EXPORT_SYMBOL(memset);
35EXPORT_SYMBOL(__xchg8); 35EXPORT_SYMBOL(__xchg8);
36EXPORT_SYMBOL(__xchg32); 36EXPORT_SYMBOL(__xchg32);
37EXPORT_SYMBOL(__cmpxchg_u32); 37EXPORT_SYMBOL(__cmpxchg_u32);
38EXPORT_SYMBOL(__cmpxchg_u64);
38#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
39EXPORT_SYMBOL(__atomic_hash); 40EXPORT_SYMBOL(__atomic_hash);
40#endif 41#endif
41#ifdef CONFIG_64BIT 42#ifdef CONFIG_64BIT
42EXPORT_SYMBOL(__xchg64); 43EXPORT_SYMBOL(__xchg64);
43EXPORT_SYMBOL(__cmpxchg_u64);
44#endif 44#endif
45 45
46#include <linux/uaccess.h> 46#include <linux/uaccess.h>
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 23de307c3052..41e60a9c7db2 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -742,7 +742,7 @@ lws_compare_and_swap_2:
74210: ldd 0(%r25), %r25 74210: ldd 0(%r25), %r25
74311: ldd 0(%r24), %r24 74311: ldd 0(%r24), %r24
744#else 744#else
745 /* Load new value into r22/r23 - high/low */ 745 /* Load old value into r22/r23 - high/low */
74610: ldw 0(%r25), %r22 74610: ldw 0(%r25), %r22
74711: ldw 4(%r25), %r23 74711: ldw 4(%r25), %r23
748 /* Load new value into fr4 for atomic store later */ 748 /* Load new value into fr4 for atomic store later */
@@ -834,11 +834,11 @@ cas2_action:
834 copy %r0, %r28 834 copy %r0, %r28
835#else 835#else
836 /* Compare first word */ 836 /* Compare first word */
83719: ldw,ma 0(%r26), %r29 83719: ldw 0(%r26), %r29
838 sub,= %r29, %r22, %r0 838 sub,= %r29, %r22, %r0
839 b,n cas2_end 839 b,n cas2_end
840 /* Compare second word */ 840 /* Compare second word */
84120: ldw,ma 4(%r26), %r29 84120: ldw 4(%r26), %r29
842 sub,= %r29, %r23, %r0 842 sub,= %r29, %r23, %r0
843 b,n cas2_end 843 b,n cas2_end
844 /* Perform the store */ 844 /* Perform the store */
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 2d956aa0a38a..8c0105a49839 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -253,7 +253,10 @@ static int __init init_cr16_clocksource(void)
253 cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; 253 cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
254 254
255 for_each_online_cpu(cpu) { 255 for_each_online_cpu(cpu) {
256 if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc) 256 if (cpu == 0)
257 continue;
258 if ((cpu0_loc != 0) &&
259 (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
257 continue; 260 continue;
258 261
259 clocksource_cr16.name = "cr16_unstable"; 262 clocksource_cr16.name = "cr16_unstable";
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index afa46a7406ea..04e042edbab7 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -27,6 +27,7 @@ CONFIG_NET=y
27CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 27CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
28CONFIG_DEVTMPFS=y 28CONFIG_DEVTMPFS=y
29# CONFIG_FIRMWARE_IN_KERNEL is not set 29# CONFIG_FIRMWARE_IN_KERNEL is not set
30CONFIG_BLK_DEV_RAM=y
30# CONFIG_BLK_DEV_XPRAM is not set 31# CONFIG_BLK_DEV_XPRAM is not set
31# CONFIG_DCSSBLK is not set 32# CONFIG_DCSSBLK is not set
32# CONFIG_DASD is not set 33# CONFIG_DASD is not set
@@ -59,6 +60,7 @@ CONFIG_CONFIGFS_FS=y
59# CONFIG_NETWORK_FILESYSTEMS is not set 60# CONFIG_NETWORK_FILESYSTEMS is not set
60CONFIG_PRINTK_TIME=y 61CONFIG_PRINTK_TIME=y
61CONFIG_DEBUG_INFO=y 62CONFIG_DEBUG_INFO=y
63CONFIG_DEBUG_FS=y
62CONFIG_DEBUG_KERNEL=y 64CONFIG_DEBUG_KERNEL=y
63CONFIG_PANIC_ON_OOPS=y 65CONFIG_PANIC_ON_OOPS=y
64# CONFIG_SCHED_DEBUG is not set 66# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1cee6753d47a..495ff6959dec 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -293,7 +293,10 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
293 lc->lpp = LPP_MAGIC; 293 lc->lpp = LPP_MAGIC;
294 lc->current_pid = tsk->pid; 294 lc->current_pid = tsk->pid;
295 lc->user_timer = tsk->thread.user_timer; 295 lc->user_timer = tsk->thread.user_timer;
296 lc->guest_timer = tsk->thread.guest_timer;
296 lc->system_timer = tsk->thread.system_timer; 297 lc->system_timer = tsk->thread.system_timer;
298 lc->hardirq_timer = tsk->thread.hardirq_timer;
299 lc->softirq_timer = tsk->thread.softirq_timer;
297 lc->steal_timer = 0; 300 lc->steal_timer = 0;
298} 301}
299 302
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index c40a95c33bb8..322d25ae23ab 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -110,6 +110,10 @@ build_mmio_write(__writeq, "q", unsigned long, "r", )
110 110
111#endif 111#endif
112 112
113#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
114extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
115extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
116
113/** 117/**
114 * virt_to_phys - map virtual addresses to physical 118 * virt_to_phys - map virtual addresses to physical
115 * @address: address to remap 119 * @address: address to remap
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index d362161d3291..c4aed0de565e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -82,12 +82,21 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
82#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) 82#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
83#endif 83#endif
84 84
85/* 85static inline bool tlb_defer_switch_to_init_mm(void)
86 * If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point 86{
87 * to init_mm when we switch to a kernel thread (e.g. the idle thread). If 87 /*
88 * it's false, then we immediately switch CR3 when entering a kernel thread. 88 * If we have PCID, then switching to init_mm is reasonably
89 */ 89 * fast. If we don't have PCID, then switching to init_mm is
90DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode); 90 * quite slow, so we try to defer it in the hopes that we can
91 * avoid it entirely. The latter approach runs the risk of
92 * receiving otherwise unnecessary IPIs.
93 *
94 * This choice is just a heuristic. The tlb code can handle this
95 * function returning true or false regardless of whether we have
96 * PCID.
97 */
98 return !static_cpu_has(X86_FEATURE_PCID);
99}
91 100
92/* 101/*
93 * 6 because 6 should be plenty and struct tlb_state will fit in 102 * 6 because 6 should be plenty and struct tlb_state will fit in
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 24f749324c0f..9990a71e311f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -831,7 +831,6 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
831 } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 831 } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
832 unsigned int apicid, nshared, first, last; 832 unsigned int apicid, nshared, first, last;
833 833
834 this_leaf = this_cpu_ci->info_list + index;
835 nshared = base->eax.split.num_threads_sharing + 1; 834 nshared = base->eax.split.num_threads_sharing + 1;
836 apicid = cpu_data(cpu).apicid; 835 apicid = cpu_data(cpu).apicid;
837 first = apicid - (apicid % nshared); 836 first = apicid - (apicid % nshared);
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 8f7a9bbad514..7dbcb7adf797 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -34,6 +34,7 @@
34#include <linux/mm.h> 34#include <linux/mm.h>
35 35
36#include <asm/microcode_intel.h> 36#include <asm/microcode_intel.h>
37#include <asm/intel-family.h>
37#include <asm/processor.h> 38#include <asm/processor.h>
38#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
39#include <asm/setup.h> 40#include <asm/setup.h>
@@ -918,6 +919,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n)
918 return 0; 919 return 0;
919} 920}
920 921
922static bool is_blacklisted(unsigned int cpu)
923{
924 struct cpuinfo_x86 *c = &cpu_data(cpu);
925
926 if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
927 pr_err_once("late loading on model 79 is disabled.\n");
928 return true;
929 }
930
931 return false;
932}
933
921static enum ucode_state request_microcode_fw(int cpu, struct device *device, 934static enum ucode_state request_microcode_fw(int cpu, struct device *device,
922 bool refresh_fw) 935 bool refresh_fw)
923{ 936{
@@ -926,6 +939,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
926 const struct firmware *firmware; 939 const struct firmware *firmware;
927 enum ucode_state ret; 940 enum ucode_state ret;
928 941
942 if (is_blacklisted(cpu))
943 return UCODE_NFOUND;
944
929 sprintf(name, "intel-ucode/%02x-%02x-%02x", 945 sprintf(name, "intel-ucode/%02x-%02x-%02x",
930 c->x86, c->x86_model, c->x86_mask); 946 c->x86, c->x86_model, c->x86_mask);
931 947
@@ -950,6 +966,9 @@ static int get_ucode_user(void *to, const void *from, size_t n)
950static enum ucode_state 966static enum ucode_state
951request_microcode_user(int cpu, const void __user *buf, size_t size) 967request_microcode_user(int cpu, const void __user *buf, size_t size)
952{ 968{
969 if (is_blacklisted(cpu))
970 return UCODE_NFOUND;
971
953 return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); 972 return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
954} 973}
955 974
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index cf2ce063f65a..2902ca4d5993 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -30,10 +30,11 @@ static void __init i386_default_early_setup(void)
30 30
31asmlinkage __visible void __init i386_start_kernel(void) 31asmlinkage __visible void __init i386_start_kernel(void)
32{ 32{
33 cr4_init_shadow(); 33 /* Make sure IDT is set up before any exception happens */
34
35 idt_setup_early_handler(); 34 idt_setup_early_handler();
36 35
36 cr4_init_shadow();
37
37 sanitize_boot_params(&boot_params); 38 sanitize_boot_params(&boot_params);
38 39
39 x86_early_init_platform_quirks(); 40 x86_early_init_platform_quirks();
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index a99679826846..320c6237e1d1 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -174,3 +174,15 @@ const char *arch_vma_name(struct vm_area_struct *vma)
174 return "[mpx]"; 174 return "[mpx]";
175 return NULL; 175 return NULL;
176} 176}
177
178int valid_phys_addr_range(phys_addr_t addr, size_t count)
179{
180 return addr + count <= __pa(high_memory);
181}
182
183int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
184{
185 phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
186
187 return valid_phys_addr_range(addr, count);
188}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 658bf0090565..0f3d0cea4d00 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -30,7 +30,6 @@
30 30
31atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); 31atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
32 32
33DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
34 33
35static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, 34static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
36 u16 *new_asid, bool *need_flush) 35 u16 *new_asid, bool *need_flush)
@@ -147,8 +146,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
147 this_cpu_write(cpu_tlbstate.is_lazy, false); 146 this_cpu_write(cpu_tlbstate.is_lazy, false);
148 147
149 if (real_prev == next) { 148 if (real_prev == next) {
150 VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != 149 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
151 next->context.ctx_id); 150 next->context.ctx_id);
152 151
153 /* 152 /*
154 * We don't currently support having a real mm loaded without 153 * We don't currently support having a real mm loaded without
@@ -213,6 +212,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
213} 212}
214 213
215/* 214/*
215 * Please ignore the name of this function. It should be called
216 * switch_to_kernel_thread().
217 *
216 * enter_lazy_tlb() is a hint from the scheduler that we are entering a 218 * enter_lazy_tlb() is a hint from the scheduler that we are entering a
217 * kernel thread or other context without an mm. Acceptable implementations 219 * kernel thread or other context without an mm. Acceptable implementations
218 * include doing nothing whatsoever, switching to init_mm, or various clever 220 * include doing nothing whatsoever, switching to init_mm, or various clever
@@ -227,7 +229,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
227 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) 229 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
228 return; 230 return;
229 231
230 if (static_branch_unlikely(&tlb_use_lazy_mode)) { 232 if (tlb_defer_switch_to_init_mm()) {
231 /* 233 /*
232 * There's a significant optimization that may be possible 234 * There's a significant optimization that may be possible
233 * here. We have accurate enough TLB flush tracking that we 235 * here. We have accurate enough TLB flush tracking that we
@@ -626,57 +628,3 @@ static int __init create_tlb_single_page_flush_ceiling(void)
626 return 0; 628 return 0;
627} 629}
628late_initcall(create_tlb_single_page_flush_ceiling); 630late_initcall(create_tlb_single_page_flush_ceiling);
629
630static ssize_t tlblazy_read_file(struct file *file, char __user *user_buf,
631 size_t count, loff_t *ppos)
632{
633 char buf[2];
634
635 buf[0] = static_branch_likely(&tlb_use_lazy_mode) ? '1' : '0';
636 buf[1] = '\n';
637
638 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
639}
640
641static ssize_t tlblazy_write_file(struct file *file,
642 const char __user *user_buf, size_t count, loff_t *ppos)
643{
644 bool val;
645
646 if (kstrtobool_from_user(user_buf, count, &val))
647 return -EINVAL;
648
649 if (val)
650 static_branch_enable(&tlb_use_lazy_mode);
651 else
652 static_branch_disable(&tlb_use_lazy_mode);
653
654 return count;
655}
656
657static const struct file_operations fops_tlblazy = {
658 .read = tlblazy_read_file,
659 .write = tlblazy_write_file,
660 .llseek = default_llseek,
661};
662
663static int __init init_tlb_use_lazy_mode(void)
664{
665 if (boot_cpu_has(X86_FEATURE_PCID)) {
666 /*
667 * Heuristic: with PCID on, switching to and from
668 * init_mm is reasonably fast, but remote flush IPIs
669 * as expensive as ever, so turn off lazy TLB mode.
670 *
671 * We can't do this in setup_pcid() because static keys
672 * haven't been initialized yet, and it would blow up
673 * badly.
674 */
675 static_branch_disable(&tlb_use_lazy_mode);
676 }
677
678 debugfs_create_file("tlb_use_lazy_mode", S_IRUSR | S_IWUSR,
679 arch_debugfs_dir, NULL, &fops_tlblazy);
680 return 0;
681}
682late_initcall(init_tlb_use_lazy_mode);
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index e4b0ed386bc8..39aecad286fe 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -57,6 +57,8 @@ struct key *find_asymmetric_key(struct key *keyring,
57 char *req, *p; 57 char *req, *p;
58 int len; 58 int len;
59 59
60 BUG_ON(!id_0 && !id_1);
61
60 if (id_0) { 62 if (id_0) {
61 lookup = id_0->data; 63 lookup = id_0->data;
62 len = id_0->len; 64 len = id_0->len;
@@ -105,7 +107,7 @@ struct key *find_asymmetric_key(struct key *keyring,
105 if (id_0 && id_1) { 107 if (id_0 && id_1) {
106 const struct asymmetric_key_ids *kids = asymmetric_key_ids(key); 108 const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
107 109
108 if (!kids->id[0]) { 110 if (!kids->id[1]) {
109 pr_debug("First ID matches, but second is missing\n"); 111 pr_debug("First ID matches, but second is missing\n");
110 goto reject; 112 goto reject;
111 } 113 }
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index af4cd8649117..d140d8bb2c96 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -88,6 +88,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
88 bool want = false; 88 bool want = false;
89 89
90 sinfo = msg->signed_infos; 90 sinfo = msg->signed_infos;
91 if (!sinfo)
92 goto inconsistent;
93
91 if (sinfo->authattrs) { 94 if (sinfo->authattrs) {
92 want = true; 95 want = true;
93 msg->have_authattrs = true; 96 msg->have_authattrs = true;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 883dfebd3014..baebbdfd74d5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
243 struct nbd_config *config = nbd->config; 243 struct nbd_config *config = nbd->config;
244 config->blksize = blocksize; 244 config->blksize = blocksize;
245 config->bytesize = blocksize * nr_blocks; 245 config->bytesize = blocksize * nr_blocks;
246 nbd_size_update(nbd);
247} 246}
248 247
249static void nbd_complete_rq(struct request *req) 248static void nbd_complete_rq(struct request *req)
@@ -1094,6 +1093,7 @@ static int nbd_start_device(struct nbd_device *nbd)
1094 args->index = i; 1093 args->index = i;
1095 queue_work(recv_workqueue, &args->work); 1094 queue_work(recv_workqueue, &args->work);
1096 } 1095 }
1096 nbd_size_update(nbd);
1097 return error; 1097 return error;
1098} 1098}
1099 1099
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 7cedb4295e9d..64d0fc17c174 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
2604 return NULL; 2604 return NULL;
2605 *dma_handle = dma_map_single(dev, buf, s->size, dir); 2605 *dma_handle = dma_map_single(dev, buf, s->size, dir);
2606 if (dma_mapping_error(dev, *dma_handle)) { 2606 if (dma_mapping_error(dev, *dma_handle)) {
2607 kfree(buf); 2607 kmem_cache_free(s, buf);
2608 buf = NULL; 2608 buf = NULL;
2609 } 2609 }
2610 return buf; 2610 return buf;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c7f396903184..70db4d5638a6 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
720 if (mbus->hw_io_coherency) 720 if (mbus->hw_io_coherency)
721 w->mbus_attr |= ATTR_HW_COHERENCY; 721 w->mbus_attr |= ATTR_HW_COHERENCY;
722 w->base = base & DDR_BASE_CS_LOW_MASK; 722 w->base = base & DDR_BASE_CS_LOW_MASK;
723 w->size = (size | ~DDR_SIZE_MASK) + 1; 723 w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
724 } 724 }
725 } 725 }
726 mvebu_mbus_dram_info.num_cs = cs; 726 mvebu_mbus_dram_info.num_cs = cs;
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index a1df588343f2..1de8cac99a0e 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -117,7 +117,8 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
117 /* Turn off the clock (and clear the event) */ 117 /* Turn off the clock (and clear the event) */
118 disable_timer(cs5535_event_clock); 118 disable_timer(cs5535_event_clock);
119 119
120 if (clockevent_state_shutdown(&cs5535_clockevent)) 120 if (clockevent_state_detached(&cs5535_clockevent) ||
121 clockevent_state_shutdown(&cs5535_clockevent))
121 return IRQ_HANDLED; 122 return IRQ_HANDLED;
122 123
123 /* Clear the counter */ 124 /* Clear the counter */
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 339186f25a2a..55f9c62ee54b 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -344,7 +344,7 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
344 344
345 spin_lock_irqsave(&mdev->lock, irqflags); 345 spin_lock_irqsave(&mdev->lock, irqflags);
346 if (desc_cnt > mdev->desc_free_cnt) { 346 if (desc_cnt > mdev->desc_free_cnt) {
347 spin_unlock_bh(&mdev->lock); 347 spin_unlock_irqrestore(&mdev->lock, irqflags);
348 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 348 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
349 return NULL; 349 return NULL;
350 } 350 }
@@ -407,7 +407,7 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
407 407
408 spin_lock_irqsave(&mdev->lock, irqflags); 408 spin_lock_irqsave(&mdev->lock, irqflags);
409 if (desc_cnt > mdev->desc_free_cnt) { 409 if (desc_cnt > mdev->desc_free_cnt) {
410 spin_unlock_bh(&mdev->lock); 410 spin_unlock_irqrestore(&mdev->lock, irqflags);
411 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 411 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
412 return NULL; 412 return NULL;
413 } 413 }
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 97c94f9683fa..38cea6fb25a8 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
205 struct amd_sched_entity *entity) 205 struct amd_sched_entity *entity)
206{ 206{
207 struct amd_sched_rq *rq = entity->rq; 207 struct amd_sched_rq *rq = entity->rq;
208 int r;
209 208
210 if (!amd_sched_entity_is_initialized(sched, entity)) 209 if (!amd_sched_entity_is_initialized(sched, entity))
211 return; 210 return;
211
212 /** 212 /**
213 * The client will not queue more IBs during this fini, consume existing 213 * The client will not queue more IBs during this fini, consume existing
214 * queued IBs or discard them on SIGKILL 214 * queued IBs
215 */ 215 */
216 if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) 216 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
217 r = -ERESTARTSYS;
218 else
219 r = wait_event_killable(sched->job_scheduled,
220 amd_sched_entity_is_idle(entity));
221 amd_sched_rq_remove_entity(rq, entity);
222 if (r) {
223 struct amd_sched_job *job;
224 217
225 /* Park the kernel for a moment to make sure it isn't processing 218 amd_sched_rq_remove_entity(rq, entity);
226 * our enity.
227 */
228 kthread_park(sched->thread);
229 kthread_unpark(sched->thread);
230 while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
231 sched->ops->free_job(job);
232
233 }
234 kfifo_free(&entity->job_queue); 219 kfifo_free(&entity->job_queue);
235} 220}
236 221
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index e651a58c18cf..82b72425a42f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = {
168static int exynos_drm_suspend(struct device *dev) 168static int exynos_drm_suspend(struct device *dev)
169{ 169{
170 struct drm_device *drm_dev = dev_get_drvdata(dev); 170 struct drm_device *drm_dev = dev_get_drvdata(dev);
171 struct exynos_drm_private *private = drm_dev->dev_private; 171 struct exynos_drm_private *private;
172 172
173 if (pm_runtime_suspended(dev) || !drm_dev) 173 if (pm_runtime_suspended(dev) || !drm_dev)
174 return 0; 174 return 0;
175 175
176 private = drm_dev->dev_private;
177
176 drm_kms_helper_poll_disable(drm_dev); 178 drm_kms_helper_poll_disable(drm_dev);
177 exynos_drm_fbdev_suspend(drm_dev); 179 exynos_drm_fbdev_suspend(drm_dev);
178 private->suspend_state = drm_atomic_helper_suspend(drm_dev); 180 private->suspend_state = drm_atomic_helper_suspend(drm_dev);
@@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev)
188static int exynos_drm_resume(struct device *dev) 190static int exynos_drm_resume(struct device *dev)
189{ 191{
190 struct drm_device *drm_dev = dev_get_drvdata(dev); 192 struct drm_device *drm_dev = dev_get_drvdata(dev);
191 struct exynos_drm_private *private = drm_dev->dev_private; 193 struct exynos_drm_private *private;
192 194
193 if (pm_runtime_suspended(dev) || !drm_dev) 195 if (pm_runtime_suspended(dev) || !drm_dev)
194 return 0; 196 return 0;
195 197
198 private = drm_dev->dev_private;
196 drm_atomic_helper_resume(drm_dev, private->suspend_state); 199 drm_atomic_helper_resume(drm_dev, private->suspend_state);
197 exynos_drm_fbdev_resume(drm_dev); 200 exynos_drm_fbdev_resume(drm_dev);
198 drm_kms_helper_poll_enable(drm_dev); 201 drm_kms_helper_poll_enable(drm_dev);
@@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev)
427 430
428 kfree(drm->dev_private); 431 kfree(drm->dev_private);
429 drm->dev_private = NULL; 432 drm->dev_private = NULL;
433 dev_set_drvdata(dev, NULL);
430 434
431 drm_dev_unref(drm); 435 drm_dev_unref(drm);
432} 436}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 436377da41ba..03532dfc0cd5 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
308 308
309static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) 309static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
310{ 310{
311 struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
312 int ring_id;
313
314 kfree(vgpu->sched_data); 311 kfree(vgpu->sched_data);
315 vgpu->sched_data = NULL; 312 vgpu->sched_data = NULL;
316
317 spin_lock_bh(&scheduler->mmio_context_lock);
318 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
319 if (scheduler->engine_owner[ring_id] == vgpu) {
320 intel_gvt_switch_mmio(vgpu, NULL, ring_id);
321 scheduler->engine_owner[ring_id] = NULL;
322 }
323 }
324 spin_unlock_bh(&scheduler->mmio_context_lock);
325} 313}
326 314
327static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) 315static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
388{ 376{
389 struct intel_gvt_workload_scheduler *scheduler = 377 struct intel_gvt_workload_scheduler *scheduler =
390 &vgpu->gvt->scheduler; 378 &vgpu->gvt->scheduler;
379 int ring_id;
391 380
392 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); 381 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
393 382
@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
401 scheduler->need_reschedule = true; 390 scheduler->need_reschedule = true;
402 scheduler->current_vgpu = NULL; 391 scheduler->current_vgpu = NULL;
403 } 392 }
393
394 spin_lock_bh(&scheduler->mmio_context_lock);
395 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
396 if (scheduler->engine_owner[ring_id] == vgpu) {
397 intel_gvt_switch_mmio(vgpu, NULL, ring_id);
398 scheduler->engine_owner[ring_id] = NULL;
399 }
400 }
401 spin_unlock_bh(&scheduler->mmio_context_lock);
404} 402}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index af289d35b77a..32e857dc507c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2657 if (READ_ONCE(obj->mm.pages)) 2657 if (READ_ONCE(obj->mm.pages))
2658 return -ENODEV; 2658 return -ENODEV;
2659 2659
2660 if (obj->mm.madv != I915_MADV_WILLNEED)
2661 return -EFAULT;
2662
2660 /* Before the pages are instantiated the object is treated as being 2663 /* Before the pages are instantiated the object is treated as being
2661 * in the CPU domain. The pages will be clflushed as required before 2664 * in the CPU domain. The pages will be clflushed as required before
2662 * use, and we can freely write into the pages directly. If userspace 2665 * use, and we can freely write into the pages directly. If userspace
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 4df039ef2ce3..e161d383b526 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,21 +33,20 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "i915_trace.h" 34#include "i915_trace.h"
35 35
36static bool ggtt_is_idle(struct drm_i915_private *dev_priv) 36static bool ggtt_is_idle(struct drm_i915_private *i915)
37{ 37{
38 struct i915_ggtt *ggtt = &dev_priv->ggtt; 38 struct intel_engine_cs *engine;
39 struct intel_engine_cs *engine; 39 enum intel_engine_id id;
40 enum intel_engine_id id;
41 40
42 for_each_engine(engine, dev_priv, id) { 41 if (i915->gt.active_requests)
43 struct intel_timeline *tl; 42 return false;
44 43
45 tl = &ggtt->base.timeline.engine[engine->id]; 44 for_each_engine(engine, i915, id) {
46 if (i915_gem_active_isset(&tl->last_request)) 45 if (engine->last_retired_context != i915->kernel_context)
47 return false; 46 return false;
48 } 47 }
49 48
50 return true; 49 return true;
51} 50}
52 51
53static int ggtt_flush(struct drm_i915_private *i915) 52static int ggtt_flush(struct drm_i915_private *i915)
@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
157 min_size, alignment, cache_level, 156 min_size, alignment, cache_level,
158 start, end, mode); 157 start, end, mode);
159 158
160 /* Retire before we search the active list. Although we have 159 /*
160 * Retire before we search the active list. Although we have
161 * reasonable accuracy in our retirement lists, we may have 161 * reasonable accuracy in our retirement lists, we may have
162 * a stray pin (preventing eviction) that can only be resolved by 162 * a stray pin (preventing eviction) that can only be resolved by
163 * retiring. 163 * retiring.
@@ -182,7 +182,8 @@ search_again:
182 BUG_ON(ret); 182 BUG_ON(ret);
183 } 183 }
184 184
185 /* Can we unpin some objects such as idle hw contents, 185 /*
186 * Can we unpin some objects such as idle hw contents,
186 * or pending flips? But since only the GGTT has global entries 187 * or pending flips? But since only the GGTT has global entries
187 * such as scanouts, rinbuffers and contexts, we can skip the 188 * such as scanouts, rinbuffers and contexts, we can skip the
188 * purge when inspecting per-process local address spaces. 189 * purge when inspecting per-process local address spaces.
@@ -190,19 +191,33 @@ search_again:
190 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) 191 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
191 return -ENOSPC; 192 return -ENOSPC;
192 193
193 if (ggtt_is_idle(dev_priv)) { 194 /*
194 /* If we still have pending pageflip completions, drop 195 * Not everything in the GGTT is tracked via VMA using
195 * back to userspace to give our workqueues time to 196 * i915_vma_move_to_active(), otherwise we could evict as required
196 * acquire our locks and unpin the old scanouts. 197 * with minimal stalling. Instead we are forced to idle the GPU and
197 */ 198 * explicitly retire outstanding requests which will then remove
198 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; 199 * the pinning for active objects such as contexts and ring,
199 } 200 * enabling us to evict them on the next iteration.
201 *
202 * To ensure that all user contexts are evictable, we perform
203 * a switch to the perma-pinned kernel context. This all also gives
204 * us a termination condition, when the last retired context is
205 * the kernel's there is no more we can evict.
206 */
207 if (!ggtt_is_idle(dev_priv)) {
208 ret = ggtt_flush(dev_priv);
209 if (ret)
210 return ret;
200 211
201 ret = ggtt_flush(dev_priv); 212 goto search_again;
202 if (ret) 213 }
203 return ret;
204 214
205 goto search_again; 215 /*
216 * If we still have pending pageflip completions, drop
217 * back to userspace to give our workqueues time to
218 * acquire our locks and unpin the old scanouts.
219 */
220 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
206 221
207found: 222found:
208 /* drm_mm doesn't allow any other other operations while 223 /* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ed7cd9ee2c2a..c9bcc6c45012 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6998,6 +6998,7 @@ enum {
6998 */ 6998 */
6999#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) 6999#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
7000#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) 7000#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
7001#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
7001 7002
7002#define GEN7_L3CNTLREG1 _MMIO(0xB01C) 7003#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
7003#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C 7004#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 476681d5940c..5e5fe03b638c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
664 int *n_entries) 664 int *n_entries)
665{ 665{
666 if (IS_BROADWELL(dev_priv)) { 666 if (IS_BROADWELL(dev_priv)) {
667 *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); 667 *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
668 return hsw_ddi_translations_fdi; 668 return bdw_ddi_translations_fdi;
669 } else if (IS_HASWELL(dev_priv)) { 669 } else if (IS_HASWELL(dev_priv)) {
670 *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); 670 *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
671 return hsw_ddi_translations_fdi; 671 return hsw_ddi_translations_fdi;
@@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
2102 * register writes. 2102 * register writes.
2103 */ 2103 */
2104 val = I915_READ(DPCLKA_CFGCR0); 2104 val = I915_READ(DPCLKA_CFGCR0);
2105 val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) | 2105 val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
2106 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port));
2107 I915_WRITE(DPCLKA_CFGCR0, val); 2106 I915_WRITE(DPCLKA_CFGCR0, val);
2108 } else if (IS_GEN9_BC(dev_priv)) { 2107 } else if (IS_GEN9_BC(dev_priv)) {
2109 /* DDI -> PLL mapping */ 2108 /* DDI -> PLL mapping */
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index a2a3d93d67bd..df808a94c511 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1996 1996
1997 /* 3. Configure DPLL_CFGCR0 */ 1997 /* 3. Configure DPLL_CFGCR0 */
1998 /* Avoid touch CFGCR1 if HDMI mode is not enabled */ 1998 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
1999 if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) { 1999 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2000 val = pll->state.hw_state.cfgcr1; 2000 val = pll->state.hw_state.cfgcr1;
2001 I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); 2001 I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
2002 /* 4. Reab back to ensure writes completed */ 2002 /* 4. Reab back to ensure writes completed */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 9ab596941372..3c2d9cf22ed5 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1048 } 1048 }
1049 1049
1050 /* WaProgramL3SqcReg1DefaultForPerf:bxt */ 1050 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1051 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) 1051 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1052 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | 1052 u32 val = I915_READ(GEN8_L3SQCREG1);
1053 L3_HIGH_PRIO_CREDITS(2)); 1053 val &= ~L3_PRIO_CREDITS_MASK;
1054 val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
1055 I915_WRITE(GEN8_L3SQCREG1, val);
1056 }
1054 1057
1055 /* WaToEnableHwFixForPushConstHWBug:bxt */ 1058 /* WaToEnableHwFixForPushConstHWBug:bxt */
1056 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) 1059 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ed662937ec3c..0a09f8ff6aff 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
8245 int high_prio_credits) 8245 int high_prio_credits)
8246{ 8246{
8247 u32 misccpctl; 8247 u32 misccpctl;
8248 u32 val;
8248 8249
8249 /* WaTempDisableDOPClkGating:bdw */ 8250 /* WaTempDisableDOPClkGating:bdw */
8250 misccpctl = I915_READ(GEN7_MISCCPCTL); 8251 misccpctl = I915_READ(GEN7_MISCCPCTL);
8251 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 8252 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
8252 8253
8253 I915_WRITE(GEN8_L3SQCREG1, 8254 val = I915_READ(GEN8_L3SQCREG1);
8254 L3_GENERAL_PRIO_CREDITS(general_prio_credits) | 8255 val &= ~L3_PRIO_CREDITS_MASK;
8255 L3_HIGH_PRIO_CREDITS(high_prio_credits)); 8256 val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
8257 val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
8258 I915_WRITE(GEN8_L3SQCREG1, val);
8256 8259
8257 /* 8260 /*
8258 * Wait at least 100 clocks before re-enabling clock gating. 8261 * Wait at least 100 clocks before re-enabling clock gating.
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index f7707849bb53..2b12d82aac15 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -223,7 +223,7 @@ void
223nouveau_fbcon_accel_save_disable(struct drm_device *dev) 223nouveau_fbcon_accel_save_disable(struct drm_device *dev)
224{ 224{
225 struct nouveau_drm *drm = nouveau_drm(dev); 225 struct nouveau_drm *drm = nouveau_drm(dev);
226 if (drm->fbcon) { 226 if (drm->fbcon && drm->fbcon->helper.fbdev) {
227 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; 227 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
228 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; 228 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
229 } 229 }
@@ -233,7 +233,7 @@ void
233nouveau_fbcon_accel_restore(struct drm_device *dev) 233nouveau_fbcon_accel_restore(struct drm_device *dev)
234{ 234{
235 struct nouveau_drm *drm = nouveau_drm(dev); 235 struct nouveau_drm *drm = nouveau_drm(dev);
236 if (drm->fbcon) { 236 if (drm->fbcon && drm->fbcon->helper.fbdev) {
237 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; 237 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
238 } 238 }
239} 239}
@@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
245 struct nouveau_fbdev *fbcon = drm->fbcon; 245 struct nouveau_fbdev *fbcon = drm->fbcon;
246 if (fbcon && drm->channel) { 246 if (fbcon && drm->channel) {
247 console_lock(); 247 console_lock();
248 fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; 248 if (fbcon->helper.fbdev)
249 fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
249 console_unlock(); 250 console_unlock();
250 nouveau_channel_idle(drm->channel); 251 nouveau_channel_idle(drm->channel);
251 nvif_object_fini(&fbcon->twod); 252 nvif_object_fini(&fbcon->twod);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2dbf62a2ac41..e4751f92b342 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3265,11 +3265,14 @@ nv50_mstm = {
3265void 3265void
3266nv50_mstm_service(struct nv50_mstm *mstm) 3266nv50_mstm_service(struct nv50_mstm *mstm)
3267{ 3267{
3268 struct drm_dp_aux *aux = mstm->mgr.aux; 3268 struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
3269 bool handled = true; 3269 bool handled = true;
3270 int ret; 3270 int ret;
3271 u8 esi[8] = {}; 3271 u8 esi[8] = {};
3272 3272
3273 if (!aux)
3274 return;
3275
3273 while (handled) { 3276 while (handled) {
3274 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); 3277 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
3275 if (ret != 8) { 3278 if (ret != 8) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 8e2e24a74774..44e116f7880d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -39,5 +39,5 @@ int
39g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) 39g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
40{ 40{
41 return nvkm_xtensa_new_(&g84_bsp, device, index, 41 return nvkm_xtensa_new_(&g84_bsp, device, index,
42 true, 0x103000, pengine); 42 device->chipset != 0x92, 0x103000, pengine);
43} 43}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index d06ad2c372bf..455da298227f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
241 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); 241 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
242 } 242 }
243 243
244 mmu->func->flush(vm);
245
244 nvkm_memory_del(&pgt); 246 nvkm_memory_del(&pgt);
245 } 247 }
246} 248}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 54a47b40546f..f96830ffd9f1 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1021,7 +1021,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
1021 } 1021 }
1022 1022
1023 dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n", 1023 dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n",
1024 rinfo->sda_gpio, rinfo->scl_gpio); 1024 rinfo->scl_gpio, rinfo->sda_gpio);
1025 1025
1026 rinfo->prepare_recovery = i2c_imx_prepare_recovery; 1026 rinfo->prepare_recovery = i2c_imx_prepare_recovery;
1027 rinfo->unprepare_recovery = i2c_imx_unprepare_recovery; 1027 rinfo->unprepare_recovery = i2c_imx_unprepare_recovery;
@@ -1100,7 +1100,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
1100 } 1100 }
1101 1101
1102 /* Request IRQ */ 1102 /* Request IRQ */
1103 ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0, 1103 ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED,
1104 pdev->name, i2c_imx); 1104 pdev->name, i2c_imx);
1105 if (ret) { 1105 if (ret) {
1106 dev_err(&pdev->dev, "can't claim irq %d\n", irq); 1106 dev_err(&pdev->dev, "can't claim irq %d\n", irq);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 22ffcb73c185..b51adffa4841 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -340,12 +340,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
340 data->word = dma_buffer[0] | (dma_buffer[1] << 8); 340 data->word = dma_buffer[0] | (dma_buffer[1] << 8);
341 break; 341 break;
342 case I2C_SMBUS_BLOCK_DATA: 342 case I2C_SMBUS_BLOCK_DATA:
343 case I2C_SMBUS_I2C_BLOCK_DATA:
344 if (desc->rxbytes != dma_buffer[0] + 1) 343 if (desc->rxbytes != dma_buffer[0] + 1)
345 return -EMSGSIZE; 344 return -EMSGSIZE;
346 345
347 memcpy(data->block, dma_buffer, desc->rxbytes); 346 memcpy(data->block, dma_buffer, desc->rxbytes);
348 break; 347 break;
348 case I2C_SMBUS_I2C_BLOCK_DATA:
349 memcpy(&data->block[1], dma_buffer, desc->rxbytes);
350 data->block[0] = desc->rxbytes;
351 break;
349 } 352 }
350 return 0; 353 return 0;
351 } 354 }
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 1ebb5e947e0b..23c2ea2baedc 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -360,6 +360,7 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
360 unsigned long fclk_rate = 12000000; 360 unsigned long fclk_rate = 12000000;
361 unsigned long internal_clk = 0; 361 unsigned long internal_clk = 0;
362 struct clk *fclk; 362 struct clk *fclk;
363 int error;
363 364
364 if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) { 365 if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) {
365 /* 366 /*
@@ -378,6 +379,13 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
378 * do this bit unconditionally. 379 * do this bit unconditionally.
379 */ 380 */
380 fclk = clk_get(omap->dev, "fck"); 381 fclk = clk_get(omap->dev, "fck");
382 if (IS_ERR(fclk)) {
383 error = PTR_ERR(fclk);
384 dev_err(omap->dev, "could not get fck: %i\n", error);
385
386 return error;
387 }
388
381 fclk_rate = clk_get_rate(fclk); 389 fclk_rate = clk_get_rate(fclk);
382 clk_put(fclk); 390 clk_put(fclk);
383 391
@@ -410,6 +418,12 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
410 else 418 else
411 internal_clk = 4000; 419 internal_clk = 4000;
412 fclk = clk_get(omap->dev, "fck"); 420 fclk = clk_get(omap->dev, "fck");
421 if (IS_ERR(fclk)) {
422 error = PTR_ERR(fclk);
423 dev_err(omap->dev, "could not get fck: %i\n", error);
424
425 return error;
426 }
413 fclk_rate = clk_get_rate(fclk) / 1000; 427 fclk_rate = clk_get_rate(fclk) / 1000;
414 clk_put(fclk); 428 clk_put(fclk);
415 429
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 0ecdb47a23ab..174579d32e5f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -85,6 +85,9 @@
85/* SB800 constants */ 85/* SB800 constants */
86#define SB800_PIIX4_SMB_IDX 0xcd6 86#define SB800_PIIX4_SMB_IDX 0xcd6
87 87
88#define KERNCZ_IMC_IDX 0x3e
89#define KERNCZ_IMC_DATA 0x3f
90
88/* 91/*
89 * SB800 port is selected by bits 2:1 of the smb_en register (0x2c) 92 * SB800 port is selected by bits 2:1 of the smb_en register (0x2c)
90 * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f. 93 * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f.
@@ -94,6 +97,12 @@
94#define SB800_PIIX4_PORT_IDX_ALT 0x2e 97#define SB800_PIIX4_PORT_IDX_ALT 0x2e
95#define SB800_PIIX4_PORT_IDX_SEL 0x2f 98#define SB800_PIIX4_PORT_IDX_SEL 0x2f
96#define SB800_PIIX4_PORT_IDX_MASK 0x06 99#define SB800_PIIX4_PORT_IDX_MASK 0x06
100#define SB800_PIIX4_PORT_IDX_SHIFT 1
101
102/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
103#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
104#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
105#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
97 106
98/* insmod parameters */ 107/* insmod parameters */
99 108
@@ -149,6 +158,8 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
149 */ 158 */
150static DEFINE_MUTEX(piix4_mutex_sb800); 159static DEFINE_MUTEX(piix4_mutex_sb800);
151static u8 piix4_port_sel_sb800; 160static u8 piix4_port_sel_sb800;
161static u8 piix4_port_mask_sb800;
162static u8 piix4_port_shift_sb800;
152static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = { 163static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
153 " port 0", " port 2", " port 3", " port 4" 164 " port 0", " port 2", " port 3", " port 4"
154}; 165};
@@ -159,6 +170,7 @@ struct i2c_piix4_adapdata {
159 170
160 /* SB800 */ 171 /* SB800 */
161 bool sb800_main; 172 bool sb800_main;
173 bool notify_imc;
162 u8 port; /* Port number, shifted */ 174 u8 port; /* Port number, shifted */
163}; 175};
164 176
@@ -347,7 +359,19 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
347 359
348 /* Find which register is used for port selection */ 360 /* Find which register is used for port selection */
349 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) { 361 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
350 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; 362 switch (PIIX4_dev->device) {
363 case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
364 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
365 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
366 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
367 break;
368 case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
369 default:
370 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
371 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
372 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
373 break;
374 }
351 } else { 375 } else {
352 mutex_lock(&piix4_mutex_sb800); 376 mutex_lock(&piix4_mutex_sb800);
353 outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX); 377 outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
@@ -355,6 +379,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
355 piix4_port_sel_sb800 = (port_sel & 0x01) ? 379 piix4_port_sel_sb800 = (port_sel & 0x01) ?
356 SB800_PIIX4_PORT_IDX_ALT : 380 SB800_PIIX4_PORT_IDX_ALT :
357 SB800_PIIX4_PORT_IDX; 381 SB800_PIIX4_PORT_IDX;
382 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
383 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
358 mutex_unlock(&piix4_mutex_sb800); 384 mutex_unlock(&piix4_mutex_sb800);
359 } 385 }
360 386
@@ -572,6 +598,67 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
572 return 0; 598 return 0;
573} 599}
574 600
601static uint8_t piix4_imc_read(uint8_t idx)
602{
603 outb_p(idx, KERNCZ_IMC_IDX);
604 return inb_p(KERNCZ_IMC_DATA);
605}
606
607static void piix4_imc_write(uint8_t idx, uint8_t value)
608{
609 outb_p(idx, KERNCZ_IMC_IDX);
610 outb_p(value, KERNCZ_IMC_DATA);
611}
612
613static int piix4_imc_sleep(void)
614{
615 int timeout = MAX_TIMEOUT;
616
617 if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
618 return -EBUSY;
619
620 /* clear response register */
621 piix4_imc_write(0x82, 0x00);
622 /* request ownership flag */
623 piix4_imc_write(0x83, 0xB4);
624 /* kick off IMC Mailbox command 96 */
625 piix4_imc_write(0x80, 0x96);
626
627 while (timeout--) {
628 if (piix4_imc_read(0x82) == 0xfa) {
629 release_region(KERNCZ_IMC_IDX, 2);
630 return 0;
631 }
632 usleep_range(1000, 2000);
633 }
634
635 release_region(KERNCZ_IMC_IDX, 2);
636 return -ETIMEDOUT;
637}
638
639static void piix4_imc_wakeup(void)
640{
641 int timeout = MAX_TIMEOUT;
642
643 if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
644 return;
645
646 /* clear response register */
647 piix4_imc_write(0x82, 0x00);
648 /* release ownership flag */
649 piix4_imc_write(0x83, 0xB5);
650 /* kick off IMC Mailbox command 96 */
651 piix4_imc_write(0x80, 0x96);
652
653 while (timeout--) {
654 if (piix4_imc_read(0x82) == 0xfa)
655 break;
656 usleep_range(1000, 2000);
657 }
658
659 release_region(KERNCZ_IMC_IDX, 2);
660}
661
575/* 662/*
576 * Handles access to multiple SMBus ports on the SB800. 663 * Handles access to multiple SMBus ports on the SB800.
577 * The port is selected by bits 2:1 of the smb_en register (0x2c). 664 * The port is selected by bits 2:1 of the smb_en register (0x2c).
@@ -612,12 +699,47 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
612 return -EBUSY; 699 return -EBUSY;
613 } 700 }
614 701
702 /*
703 * Notify the IMC (Integrated Micro Controller) if required.
704 * Among other responsibilities, the IMC is in charge of monitoring
705 * the System fans and temperature sensors, and act accordingly.
706 * All this is done through SMBus and can/will collide
707 * with our transactions if they are long (BLOCK_DATA).
708 * Therefore we need to request the ownership flag during those
709 * transactions.
710 */
711 if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) {
712 int ret;
713
714 ret = piix4_imc_sleep();
715 switch (ret) {
716 case -EBUSY:
717 dev_warn(&adap->dev,
718 "IMC base address index region 0x%x already in use.\n",
719 KERNCZ_IMC_IDX);
720 break;
721 case -ETIMEDOUT:
722 dev_warn(&adap->dev,
723 "Failed to communicate with the IMC.\n");
724 break;
725 default:
726 break;
727 }
728
729 /* If IMC communication fails do not retry */
730 if (ret) {
731 dev_warn(&adap->dev,
732 "Continuing without IMC notification.\n");
733 adapdata->notify_imc = false;
734 }
735 }
736
615 outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); 737 outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
616 smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); 738 smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
617 739
618 port = adapdata->port; 740 port = adapdata->port;
619 if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port) 741 if ((smba_en_lo & piix4_port_mask_sb800) != port)
620 outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port, 742 outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
621 SB800_PIIX4_SMB_IDX + 1); 743 SB800_PIIX4_SMB_IDX + 1);
622 744
623 retval = piix4_access(adap, addr, flags, read_write, 745 retval = piix4_access(adap, addr, flags, read_write,
@@ -628,6 +750,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
628 /* Release the semaphore */ 750 /* Release the semaphore */
629 outb_p(smbslvcnt | 0x20, SMBSLVCNT); 751 outb_p(smbslvcnt | 0x20, SMBSLVCNT);
630 752
753 if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc)
754 piix4_imc_wakeup();
755
631 mutex_unlock(&piix4_mutex_sb800); 756 mutex_unlock(&piix4_mutex_sb800);
632 757
633 return retval; 758 return retval;
@@ -679,7 +804,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
679static struct i2c_adapter *piix4_aux_adapter; 804static struct i2c_adapter *piix4_aux_adapter;
680 805
681static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, 806static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
682 bool sb800_main, u8 port, 807 bool sb800_main, u8 port, bool notify_imc,
683 const char *name, struct i2c_adapter **padap) 808 const char *name, struct i2c_adapter **padap)
684{ 809{
685 struct i2c_adapter *adap; 810 struct i2c_adapter *adap;
@@ -706,7 +831,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
706 831
707 adapdata->smba = smba; 832 adapdata->smba = smba;
708 adapdata->sb800_main = sb800_main; 833 adapdata->sb800_main = sb800_main;
709 adapdata->port = port << 1; 834 adapdata->port = port << piix4_port_shift_sb800;
835 adapdata->notify_imc = notify_imc;
710 836
711 /* set up the sysfs linkage to our parent device */ 837 /* set up the sysfs linkage to our parent device */
712 adap->dev.parent = &dev->dev; 838 adap->dev.parent = &dev->dev;
@@ -728,14 +854,15 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
728 return 0; 854 return 0;
729} 855}
730 856
731static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba) 857static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba,
858 bool notify_imc)
732{ 859{
733 struct i2c_piix4_adapdata *adapdata; 860 struct i2c_piix4_adapdata *adapdata;
734 int port; 861 int port;
735 int retval; 862 int retval;
736 863
737 for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) { 864 for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) {
738 retval = piix4_add_adapter(dev, smba, true, port, 865 retval = piix4_add_adapter(dev, smba, true, port, notify_imc,
739 piix4_main_port_names_sb800[port], 866 piix4_main_port_names_sb800[port],
740 &piix4_main_adapters[port]); 867 &piix4_main_adapters[port]);
741 if (retval < 0) 868 if (retval < 0)
@@ -769,6 +896,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
769 dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && 896 dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
770 dev->revision >= 0x40) || 897 dev->revision >= 0x40) ||
771 dev->vendor == PCI_VENDOR_ID_AMD) { 898 dev->vendor == PCI_VENDOR_ID_AMD) {
899 bool notify_imc = false;
772 is_sb800 = true; 900 is_sb800 = true;
773 901
774 if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) { 902 if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) {
@@ -778,6 +906,20 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
778 return -EBUSY; 906 return -EBUSY;
779 } 907 }
780 908
909 if (dev->vendor == PCI_VENDOR_ID_AMD &&
910 dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) {
911 u8 imc;
912
913 /*
914 * Detect if IMC is active or not, this method is
915 * described on coreboot's AMD IMC notes
916 */
917 pci_bus_read_config_byte(dev->bus, PCI_DEVFN(0x14, 3),
918 0x40, &imc);
919 if (imc & 0x80)
920 notify_imc = true;
921 }
922
781 /* base address location etc changed in SB800 */ 923 /* base address location etc changed in SB800 */
782 retval = piix4_setup_sb800(dev, id, 0); 924 retval = piix4_setup_sb800(dev, id, 0);
783 if (retval < 0) { 925 if (retval < 0) {
@@ -789,7 +931,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
789 * Try to register multiplexed main SMBus adapter, 931 * Try to register multiplexed main SMBus adapter,
790 * give up if we can't 932 * give up if we can't
791 */ 933 */
792 retval = piix4_add_adapters_sb800(dev, retval); 934 retval = piix4_add_adapters_sb800(dev, retval, notify_imc);
793 if (retval < 0) { 935 if (retval < 0) {
794 release_region(SB800_PIIX4_SMB_IDX, 2); 936 release_region(SB800_PIIX4_SMB_IDX, 2);
795 return retval; 937 return retval;
@@ -800,7 +942,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
800 return retval; 942 return retval;
801 943
802 /* Try to register main SMBus adapter, give up if we can't */ 944 /* Try to register main SMBus adapter, give up if we can't */
803 retval = piix4_add_adapter(dev, retval, false, 0, "", 945 retval = piix4_add_adapter(dev, retval, false, 0, false, "",
804 &piix4_main_adapters[0]); 946 &piix4_main_adapters[0]);
805 if (retval < 0) 947 if (retval < 0)
806 return retval; 948 return retval;
@@ -827,7 +969,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
827 if (retval > 0) { 969 if (retval > 0) {
828 /* Try to add the aux adapter if it exists, 970 /* Try to add the aux adapter if it exists,
829 * piix4_add_adapter will clean up if this fails */ 971 * piix4_add_adapter will clean up if this fails */
830 piix4_add_adapter(dev, retval, false, 0, 972 piix4_add_adapter(dev, retval, false, 0, false,
831 is_sb800 ? piix4_aux_port_name_sb800 : "", 973 is_sb800 ? piix4_aux_port_name_sb800 : "",
832 &piix4_aux_adapter); 974 &piix4_aux_adapter);
833 } 975 }
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d268fdc23c64..762bfb9487dc 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -933,58 +933,52 @@ int input_set_keycode(struct input_dev *dev,
933} 933}
934EXPORT_SYMBOL(input_set_keycode); 934EXPORT_SYMBOL(input_set_keycode);
935 935
936bool input_match_device_id(const struct input_dev *dev,
937 const struct input_device_id *id)
938{
939 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
940 if (id->bustype != dev->id.bustype)
941 return false;
942
943 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
944 if (id->vendor != dev->id.vendor)
945 return false;
946
947 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
948 if (id->product != dev->id.product)
949 return false;
950
951 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
952 if (id->version != dev->id.version)
953 return false;
954
955 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
956 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
957 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
958 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
959 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
960 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
961 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
962 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
963 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
964 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
965 return false;
966 }
967
968 return true;
969}
970EXPORT_SYMBOL(input_match_device_id);
971
936static const struct input_device_id *input_match_device(struct input_handler *handler, 972static const struct input_device_id *input_match_device(struct input_handler *handler,
937 struct input_dev *dev) 973 struct input_dev *dev)
938{ 974{
939 const struct input_device_id *id; 975 const struct input_device_id *id;
940 976
941 for (id = handler->id_table; id->flags || id->driver_info; id++) { 977 for (id = handler->id_table; id->flags || id->driver_info; id++) {
942 978 if (input_match_device_id(dev, id) &&
943 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) 979 (!handler->match || handler->match(handler, dev))) {
944 if (id->bustype != dev->id.bustype)
945 continue;
946
947 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
948 if (id->vendor != dev->id.vendor)
949 continue;
950
951 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
952 if (id->product != dev->id.product)
953 continue;
954
955 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
956 if (id->version != dev->id.version)
957 continue;
958
959 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX))
960 continue;
961
962 if (!bitmap_subset(id->keybit, dev->keybit, KEY_MAX))
963 continue;
964
965 if (!bitmap_subset(id->relbit, dev->relbit, REL_MAX))
966 continue;
967
968 if (!bitmap_subset(id->absbit, dev->absbit, ABS_MAX))
969 continue;
970
971 if (!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX))
972 continue;
973
974 if (!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX))
975 continue;
976
977 if (!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX))
978 continue;
979
980 if (!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX))
981 continue;
982
983 if (!bitmap_subset(id->swbit, dev->swbit, SW_MAX))
984 continue;
985
986 if (!handler->match || handler->match(handler, dev))
987 return id; 980 return id;
981 }
988 } 982 }
989 983
990 return NULL; 984 return NULL;
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 29d677c714d2..7b29a8944039 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -747,6 +747,68 @@ static void joydev_cleanup(struct joydev *joydev)
747 input_close_device(handle); 747 input_close_device(handle);
748} 748}
749 749
750/*
751 * These codes are copied from from hid-ids.h, unfortunately there is no common
752 * usb_ids/bt_ids.h header.
753 */
754#define USB_VENDOR_ID_SONY 0x054c
755#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
756#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
757#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
758#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
759
760#define USB_VENDOR_ID_THQ 0x20d6
761#define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17
762
763#define ACCEL_DEV(vnd, prd) \
764 { \
765 .flags = INPUT_DEVICE_ID_MATCH_VENDOR | \
766 INPUT_DEVICE_ID_MATCH_PRODUCT | \
767 INPUT_DEVICE_ID_MATCH_PROPBIT, \
768 .vendor = (vnd), \
769 .product = (prd), \
770 .propbit = { BIT_MASK(INPUT_PROP_ACCELEROMETER) }, \
771 }
772
773static const struct input_device_id joydev_blacklist[] = {
774 /* Avoid touchpads and touchscreens */
775 {
776 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
777 INPUT_DEVICE_ID_MATCH_KEYBIT,
778 .evbit = { BIT_MASK(EV_KEY) },
779 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
780 },
781 /* Avoid tablets, digitisers and similar devices */
782 {
783 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
784 INPUT_DEVICE_ID_MATCH_KEYBIT,
785 .evbit = { BIT_MASK(EV_KEY) },
786 .keybit = { [BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_DIGI) },
787 },
788 /* Disable accelerometers on composite devices */
789 ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER),
790 ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
791 ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
792 ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
793 ACCEL_DEV(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW),
794 { /* sentinel */ }
795};
796
797static bool joydev_dev_is_blacklisted(struct input_dev *dev)
798{
799 const struct input_device_id *id;
800
801 for (id = joydev_blacklist; id->flags; id++) {
802 if (input_match_device_id(dev, id)) {
803 dev_dbg(&dev->dev,
804 "joydev: blacklisting '%s'\n", dev->name);
805 return true;
806 }
807 }
808
809 return false;
810}
811
750static bool joydev_dev_is_absolute_mouse(struct input_dev *dev) 812static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
751{ 813{
752 DECLARE_BITMAP(jd_scratch, KEY_CNT); 814 DECLARE_BITMAP(jd_scratch, KEY_CNT);
@@ -807,12 +869,8 @@ static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
807 869
808static bool joydev_match(struct input_handler *handler, struct input_dev *dev) 870static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
809{ 871{
810 /* Avoid touchpads and touchscreens */ 872 /* Disable blacklisted devices */
811 if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_TOUCH, dev->keybit)) 873 if (joydev_dev_is_blacklisted(dev))
812 return false;
813
814 /* Avoid tablets, digitisers and similar devices */
815 if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit))
816 return false; 874 return false;
817 875
818 /* Avoid absolute mice */ 876 /* Avoid absolute mice */
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index e37e335e406f..6da607d3b811 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -234,14 +234,7 @@ static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
234static int tca8418_configure(struct tca8418_keypad *keypad_data, 234static int tca8418_configure(struct tca8418_keypad *keypad_data,
235 u32 rows, u32 cols) 235 u32 rows, u32 cols)
236{ 236{
237 int reg, error; 237 int reg, error = 0;
238
239 /* Write config register, if this fails assume device not present */
240 error = tca8418_write_byte(keypad_data, REG_CFG,
241 CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
242 if (error < 0)
243 return -ENODEV;
244
245 238
246 /* Assemble a mask for row and column registers */ 239 /* Assemble a mask for row and column registers */
247 reg = ~(~0 << rows); 240 reg = ~(~0 << rows);
@@ -257,6 +250,12 @@ static int tca8418_configure(struct tca8418_keypad *keypad_data,
257 error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8); 250 error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8);
258 error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16); 251 error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16);
259 252
253 if (error)
254 return error;
255
256 error = tca8418_write_byte(keypad_data, REG_CFG,
257 CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
258
260 return error; 259 return error;
261} 260}
262 261
@@ -268,6 +267,7 @@ static int tca8418_keypad_probe(struct i2c_client *client,
268 struct input_dev *input; 267 struct input_dev *input;
269 u32 rows = 0, cols = 0; 268 u32 rows = 0, cols = 0;
270 int error, row_shift, max_keys; 269 int error, row_shift, max_keys;
270 u8 reg;
271 271
272 /* Check i2c driver capabilities */ 272 /* Check i2c driver capabilities */
273 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) { 273 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
@@ -301,10 +301,10 @@ static int tca8418_keypad_probe(struct i2c_client *client,
301 keypad_data->client = client; 301 keypad_data->client = client;
302 keypad_data->row_shift = row_shift; 302 keypad_data->row_shift = row_shift;
303 303
304 /* Initialize the chip or fail if chip isn't present */ 304 /* Read key lock register, if this fails assume device not present */
305 error = tca8418_configure(keypad_data, rows, cols); 305 error = tca8418_read_byte(keypad_data, REG_KEY_LCK_EC, &reg);
306 if (error < 0) 306 if (error)
307 return error; 307 return -ENODEV;
308 308
309 /* Configure input device */ 309 /* Configure input device */
310 input = devm_input_allocate_device(dev); 310 input = devm_input_allocate_device(dev);
@@ -340,6 +340,11 @@ static int tca8418_keypad_probe(struct i2c_client *client,
340 return error; 340 return error;
341 } 341 }
342 342
343 /* Initialize the chip */
344 error = tca8418_configure(keypad_data, rows, cols);
345 if (error < 0)
346 return error;
347
343 error = input_register_device(input); 348 error = input_register_device(input);
344 if (error) { 349 if (error) {
345 dev_err(dev, "Unable to register input device, error: %d\n", 350 dev_err(dev, "Unable to register input device, error: %d\n",
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 6cee5adc3b5c..debeeaeb8812 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -403,6 +403,7 @@ static const struct platform_device_id axp_pek_id_match[] = {
403 }, 403 },
404 { /* sentinel */ } 404 { /* sentinel */ }
405}; 405};
406MODULE_DEVICE_TABLE(platform, axp_pek_id_match);
406 407
407static struct platform_driver axp20x_pek_driver = { 408static struct platform_driver axp20x_pek_driver = {
408 .probe = axp20x_pek_probe, 409 .probe = axp20x_pek_probe,
@@ -417,4 +418,3 @@ module_platform_driver(axp20x_pek_driver);
417MODULE_DESCRIPTION("axp20x Power Button"); 418MODULE_DESCRIPTION("axp20x Power Button");
418MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); 419MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
419MODULE_LICENSE("GPL"); 420MODULE_LICENSE("GPL");
420MODULE_ALIAS("platform:axp20x-pek");
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 6bf82ea8c918..ae473123583b 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
1635 return NULL; 1635 return NULL;
1636 } 1636 }
1637 1637
1638 while (buflen > 0) { 1638 while (buflen >= sizeof(*union_desc)) {
1639 union_desc = (struct usb_cdc_union_desc *)buf; 1639 union_desc = (struct usb_cdc_union_desc *)buf;
1640 1640
1641 if (union_desc->bLength > buflen) {
1642 dev_err(&intf->dev, "Too large descriptor\n");
1643 return NULL;
1644 }
1645
1641 if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && 1646 if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
1642 union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { 1647 union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
1643 dev_dbg(&intf->dev, "Found union header\n"); 1648 dev_dbg(&intf->dev, "Found union header\n");
1644 return union_desc; 1649
1650 if (union_desc->bLength >= sizeof(*union_desc))
1651 return union_desc;
1652
1653 dev_err(&intf->dev,
1654 "Union descriptor to short (%d vs %zd\n)",
1655 union_desc->bLength, sizeof(*union_desc));
1656 return NULL;
1645 } 1657 }
1646 1658
1647 buflen -= union_desc->bLength; 1659 buflen -= union_desc->bLength;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 5af0b7d200bc..ee5466a374bf 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1709,8 +1709,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
1709 .sensor_pdata = { 1709 .sensor_pdata = {
1710 .sensor_type = rmi_sensor_touchpad, 1710 .sensor_type = rmi_sensor_touchpad,
1711 .axis_align.flip_y = true, 1711 .axis_align.flip_y = true,
1712 /* to prevent cursors jumps: */ 1712 .kernel_tracking = false,
1713 .kernel_tracking = true,
1714 .topbuttonpad = topbuttonpad, 1713 .topbuttonpad = topbuttonpad,
1715 }, 1714 },
1716 .f30_data = { 1715 .f30_data = {
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 32d2762448aa..b3bbad7d2282 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -72,6 +72,9 @@ struct goodix_ts_data {
72#define GOODIX_REG_CONFIG_DATA 0x8047 72#define GOODIX_REG_CONFIG_DATA 0x8047
73#define GOODIX_REG_ID 0x8140 73#define GOODIX_REG_ID 0x8140
74 74
75#define GOODIX_BUFFER_STATUS_READY BIT(7)
76#define GOODIX_BUFFER_STATUS_TIMEOUT 20
77
75#define RESOLUTION_LOC 1 78#define RESOLUTION_LOC 1
76#define MAX_CONTACTS_LOC 5 79#define MAX_CONTACTS_LOC 5
77#define TRIGGER_LOC 6 80#define TRIGGER_LOC 6
@@ -195,35 +198,53 @@ static int goodix_get_cfg_len(u16 id)
195 198
196static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) 199static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
197{ 200{
201 unsigned long max_timeout;
198 int touch_num; 202 int touch_num;
199 int error; 203 int error;
200 204
201 error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, data, 205 /*
202 GOODIX_CONTACT_SIZE + 1); 206 * The 'buffer status' bit, which indicates that the data is valid, is
203 if (error) { 207 * not set as soon as the interrupt is raised, but slightly after.
204 dev_err(&ts->client->dev, "I2C transfer error: %d\n", error); 208 * This takes around 10 ms to happen, so we poll for 20 ms.
205 return error; 209 */
206 } 210 max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
211 do {
212 error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
213 data, GOODIX_CONTACT_SIZE + 1);
214 if (error) {
215 dev_err(&ts->client->dev, "I2C transfer error: %d\n",
216 error);
217 return error;
218 }
207 219
208 if (!(data[0] & 0x80)) 220 if (data[0] & GOODIX_BUFFER_STATUS_READY) {
209 return -EAGAIN; 221 touch_num = data[0] & 0x0f;
222 if (touch_num > ts->max_touch_num)
223 return -EPROTO;
224
225 if (touch_num > 1) {
226 data += 1 + GOODIX_CONTACT_SIZE;
227 error = goodix_i2c_read(ts->client,
228 GOODIX_READ_COOR_ADDR +
229 1 + GOODIX_CONTACT_SIZE,
230 data,
231 GOODIX_CONTACT_SIZE *
232 (touch_num - 1));
233 if (error)
234 return error;
235 }
236
237 return touch_num;
238 }
210 239
211 touch_num = data[0] & 0x0f; 240 usleep_range(1000, 2000); /* Poll every 1 - 2 ms */
212 if (touch_num > ts->max_touch_num) 241 } while (time_before(jiffies, max_timeout));
213 return -EPROTO;
214
215 if (touch_num > 1) {
216 data += 1 + GOODIX_CONTACT_SIZE;
217 error = goodix_i2c_read(ts->client,
218 GOODIX_READ_COOR_ADDR +
219 1 + GOODIX_CONTACT_SIZE,
220 data,
221 GOODIX_CONTACT_SIZE * (touch_num - 1));
222 if (error)
223 return error;
224 }
225 242
226 return touch_num; 243 /*
244 * The Goodix panel will send spurious interrupts after a
245 * 'finger up' event, which will always cause a timeout.
246 */
247 return 0;
227} 248}
228 249
229static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) 250static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index 157fdb4bb2e8..8c6c6178ec12 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -663,12 +663,10 @@ static int stmfts_probe(struct i2c_client *client,
663 sdata->input->open = stmfts_input_open; 663 sdata->input->open = stmfts_input_open;
664 sdata->input->close = stmfts_input_close; 664 sdata->input->close = stmfts_input_close;
665 665
666 input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_X);
667 input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_Y);
666 touchscreen_parse_properties(sdata->input, true, &sdata->prop); 668 touchscreen_parse_properties(sdata->input, true, &sdata->prop);
667 669
668 input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0,
669 sdata->prop.max_x, 0, 0);
670 input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0,
671 sdata->prop.max_y, 0, 0);
672 input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); 670 input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
673 input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0); 671 input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
674 input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0); 672 input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 7953381d939a..f1043ae71dcc 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -161,7 +161,7 @@ static void titsc_step_config(struct titsc *ts_dev)
161 break; 161 break;
162 case 5: 162 case 5:
163 config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 | 163 config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 |
164 ts_dev->bit_xn | ts_dev->bit_yp; 164 STEPCONFIG_XNP | STEPCONFIG_YPN;
165 break; 165 break;
166 case 8: 166 case 8:
167 config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp); 167 config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e8d89343d613..e88395605e32 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -107,6 +107,10 @@ struct its_node {
107 107
108#define ITS_ITT_ALIGN SZ_256 108#define ITS_ITT_ALIGN SZ_256
109 109
110/* The maximum number of VPEID bits supported by VLPI commands */
111#define ITS_MAX_VPEID_BITS (16)
112#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
113
110/* Convert page order to size in bytes */ 114/* Convert page order to size in bytes */
111#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) 115#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
112 116
@@ -308,7 +312,7 @@ static void its_encode_size(struct its_cmd_block *cmd, u8 size)
308 312
309static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) 313static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
310{ 314{
311 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8); 315 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
312} 316}
313 317
314static void its_encode_valid(struct its_cmd_block *cmd, int valid) 318static void its_encode_valid(struct its_cmd_block *cmd, int valid)
@@ -318,7 +322,7 @@ static void its_encode_valid(struct its_cmd_block *cmd, int valid)
318 322
319static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) 323static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
320{ 324{
321 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16); 325 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
322} 326}
323 327
324static void its_encode_collection(struct its_cmd_block *cmd, u16 col) 328static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
@@ -358,7 +362,7 @@ static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
358 362
359static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) 363static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
360{ 364{
361 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16); 365 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
362} 366}
363 367
364static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) 368static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
@@ -1478,9 +1482,9 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
1478 u64 val = its_read_baser(its, baser); 1482 u64 val = its_read_baser(its, baser);
1479 u64 esz = GITS_BASER_ENTRY_SIZE(val); 1483 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1480 u64 type = GITS_BASER_TYPE(val); 1484 u64 type = GITS_BASER_TYPE(val);
1485 u64 baser_phys, tmp;
1481 u32 alloc_pages; 1486 u32 alloc_pages;
1482 void *base; 1487 void *base;
1483 u64 tmp;
1484 1488
1485retry_alloc_baser: 1489retry_alloc_baser:
1486 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); 1490 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
@@ -1496,8 +1500,24 @@ retry_alloc_baser:
1496 if (!base) 1500 if (!base)
1497 return -ENOMEM; 1501 return -ENOMEM;
1498 1502
1503 baser_phys = virt_to_phys(base);
1504
1505 /* Check if the physical address of the memory is above 48bits */
1506 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1507
1508 /* 52bit PA is supported only when PageSize=64K */
1509 if (psz != SZ_64K) {
1510 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1511 free_pages((unsigned long)base, order);
1512 return -ENXIO;
1513 }
1514
1515 /* Convert 52bit PA to 48bit field */
1516 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1517 }
1518
1499retry_baser: 1519retry_baser:
1500 val = (virt_to_phys(base) | 1520 val = (baser_phys |
1501 (type << GITS_BASER_TYPE_SHIFT) | 1521 (type << GITS_BASER_TYPE_SHIFT) |
1502 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | 1522 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1503 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | 1523 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
@@ -1582,13 +1602,12 @@ retry_baser:
1582 1602
1583static bool its_parse_indirect_baser(struct its_node *its, 1603static bool its_parse_indirect_baser(struct its_node *its,
1584 struct its_baser *baser, 1604 struct its_baser *baser,
1585 u32 psz, u32 *order) 1605 u32 psz, u32 *order, u32 ids)
1586{ 1606{
1587 u64 tmp = its_read_baser(its, baser); 1607 u64 tmp = its_read_baser(its, baser);
1588 u64 type = GITS_BASER_TYPE(tmp); 1608 u64 type = GITS_BASER_TYPE(tmp);
1589 u64 esz = GITS_BASER_ENTRY_SIZE(tmp); 1609 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
1590 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; 1610 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
1591 u32 ids = its->device_ids;
1592 u32 new_order = *order; 1611 u32 new_order = *order;
1593 bool indirect = false; 1612 bool indirect = false;
1594 1613
@@ -1680,9 +1699,13 @@ static int its_alloc_tables(struct its_node *its)
1680 continue; 1699 continue;
1681 1700
1682 case GITS_BASER_TYPE_DEVICE: 1701 case GITS_BASER_TYPE_DEVICE:
1702 indirect = its_parse_indirect_baser(its, baser,
1703 psz, &order,
1704 its->device_ids);
1683 case GITS_BASER_TYPE_VCPU: 1705 case GITS_BASER_TYPE_VCPU:
1684 indirect = its_parse_indirect_baser(its, baser, 1706 indirect = its_parse_indirect_baser(its, baser,
1685 psz, &order); 1707 psz, &order,
1708 ITS_MAX_VPEID_BITS);
1686 break; 1709 break;
1687 } 1710 }
1688 1711
@@ -2551,7 +2574,7 @@ static struct irq_chip its_vpe_irq_chip = {
2551 2574
2552static int its_vpe_id_alloc(void) 2575static int its_vpe_id_alloc(void)
2553{ 2576{
2554 return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL); 2577 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
2555} 2578}
2556 2579
2557static void its_vpe_id_free(u16 id) 2580static void its_vpe_id_free(u16 id)
@@ -2851,7 +2874,7 @@ static int its_init_vpe_domain(void)
2851 return -ENOMEM; 2874 return -ENOMEM;
2852 } 2875 }
2853 2876
2854 BUG_ON(entries != vpe_proxy.dev->nr_ites); 2877 BUG_ON(entries > vpe_proxy.dev->nr_ites);
2855 2878
2856 raw_spin_lock_init(&vpe_proxy.lock); 2879 raw_spin_lock_init(&vpe_proxy.lock);
2857 vpe_proxy.next_victim = 0; 2880 vpe_proxy.next_victim = 0;
diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c
index bdbb5c0ff7fe..0c085303a583 100644
--- a/drivers/irqchip/irq-tango.c
+++ b/drivers/irqchip/irq-tango.c
@@ -141,7 +141,7 @@ static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
141 for (i = 0; i < 2; i++) { 141 for (i = 0; i < 2; i++) {
142 ct[i].chip.irq_ack = irq_gc_ack_set_bit; 142 ct[i].chip.irq_ack = irq_gc_ack_set_bit;
143 ct[i].chip.irq_mask = irq_gc_mask_disable_reg; 143 ct[i].chip.irq_mask = irq_gc_mask_disable_reg;
144 ct[i].chip.irq_mask_ack = irq_gc_mask_disable_reg_and_ack; 144 ct[i].chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set;
145 ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg; 145 ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg;
146 ct[i].chip.irq_set_type = tangox_irq_set_type; 146 ct[i].chip.irq_set_type = tangox_irq_set_type;
147 ct[i].chip.name = gc->domain->name; 147 ct[i].chip.name = gc->domain->name;
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index eed6c397d840..f8a808d45034 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -1797,12 +1797,19 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1797 */ 1797 */
1798 switch (msg->msg[1]) { 1798 switch (msg->msg[1]) {
1799 case CEC_MSG_GET_CEC_VERSION: 1799 case CEC_MSG_GET_CEC_VERSION:
1800 case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
1801 case CEC_MSG_ABORT: 1800 case CEC_MSG_ABORT:
1802 case CEC_MSG_GIVE_DEVICE_POWER_STATUS: 1801 case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
1803 case CEC_MSG_GIVE_PHYSICAL_ADDR:
1804 case CEC_MSG_GIVE_OSD_NAME: 1802 case CEC_MSG_GIVE_OSD_NAME:
1803 /*
1804 * These messages reply with a directed message, so ignore if
1805 * the initiator is Unregistered.
1806 */
1807 if (!adap->passthrough && from_unregistered)
1808 return 0;
1809 /* Fall through */
1810 case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
1805 case CEC_MSG_GIVE_FEATURES: 1811 case CEC_MSG_GIVE_FEATURES:
1812 case CEC_MSG_GIVE_PHYSICAL_ADDR:
1806 /* 1813 /*
1807 * Skip processing these messages if the passthrough mode 1814 * Skip processing these messages if the passthrough mode
1808 * is on. 1815 * is on.
@@ -1810,7 +1817,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1810 if (adap->passthrough) 1817 if (adap->passthrough)
1811 goto skip_processing; 1818 goto skip_processing;
1812 /* Ignore if addressing is wrong */ 1819 /* Ignore if addressing is wrong */
1813 if (is_broadcast || from_unregistered) 1820 if (is_broadcast)
1814 return 0; 1821 return 0;
1815 break; 1822 break;
1816 1823
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 2fcba1616168..9139d01ba7ed 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -141,22 +141,39 @@ struct dvb_frontend_private {
141static void dvb_frontend_invoke_release(struct dvb_frontend *fe, 141static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
142 void (*release)(struct dvb_frontend *fe)); 142 void (*release)(struct dvb_frontend *fe));
143 143
144static void dvb_frontend_free(struct kref *ref) 144static void __dvb_frontend_free(struct dvb_frontend *fe)
145{ 145{
146 struct dvb_frontend *fe =
147 container_of(ref, struct dvb_frontend, refcount);
148 struct dvb_frontend_private *fepriv = fe->frontend_priv; 146 struct dvb_frontend_private *fepriv = fe->frontend_priv;
149 147
148 if (!fepriv)
149 return;
150
150 dvb_free_device(fepriv->dvbdev); 151 dvb_free_device(fepriv->dvbdev);
151 152
152 dvb_frontend_invoke_release(fe, fe->ops.release); 153 dvb_frontend_invoke_release(fe, fe->ops.release);
153 154
154 kfree(fepriv); 155 kfree(fepriv);
156 fe->frontend_priv = NULL;
157}
158
159static void dvb_frontend_free(struct kref *ref)
160{
161 struct dvb_frontend *fe =
162 container_of(ref, struct dvb_frontend, refcount);
163
164 __dvb_frontend_free(fe);
155} 165}
156 166
157static void dvb_frontend_put(struct dvb_frontend *fe) 167static void dvb_frontend_put(struct dvb_frontend *fe)
158{ 168{
159 kref_put(&fe->refcount, dvb_frontend_free); 169 /*
170 * Check if the frontend was registered, as otherwise
171 * kref was not initialized yet.
172 */
173 if (fe->frontend_priv)
174 kref_put(&fe->refcount, dvb_frontend_free);
175 else
176 __dvb_frontend_free(fe);
160} 177}
161 178
162static void dvb_frontend_get(struct dvb_frontend *fe) 179static void dvb_frontend_get(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index 224283fe100a..4d086a7248e9 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -55,29 +55,57 @@ struct dib3000mc_state {
55 55
56static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg) 56static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg)
57{ 57{
58 u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff };
59 u8 rb[2];
60 struct i2c_msg msg[2] = { 58 struct i2c_msg msg[2] = {
61 { .addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2 }, 59 { .addr = state->i2c_addr >> 1, .flags = 0, .len = 2 },
62 { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 }, 60 { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 },
63 }; 61 };
62 u16 word;
63 u8 *b;
64
65 b = kmalloc(4, GFP_KERNEL);
66 if (!b)
67 return 0;
68
69 b[0] = (reg >> 8) | 0x80;
70 b[1] = reg;
71 b[2] = 0;
72 b[3] = 0;
73
74 msg[0].buf = b;
75 msg[1].buf = b + 2;
64 76
65 if (i2c_transfer(state->i2c_adap, msg, 2) != 2) 77 if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
66 dprintk("i2c read error on %d\n",reg); 78 dprintk("i2c read error on %d\n",reg);
67 79
68 return (rb[0] << 8) | rb[1]; 80 word = (b[2] << 8) | b[3];
81 kfree(b);
82
83 return word;
69} 84}
70 85
71static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val) 86static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val)
72{ 87{
73 u8 b[4] = {
74 (reg >> 8) & 0xff, reg & 0xff,
75 (val >> 8) & 0xff, val & 0xff,
76 };
77 struct i2c_msg msg = { 88 struct i2c_msg msg = {
78 .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4 89 .addr = state->i2c_addr >> 1, .flags = 0, .len = 4
79 }; 90 };
80 return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0; 91 int rc;
92 u8 *b;
93
94 b = kmalloc(4, GFP_KERNEL);
95 if (!b)
96 return -ENOMEM;
97
98 b[0] = reg >> 8;
99 b[1] = reg;
100 b[2] = val >> 8;
101 b[3] = val;
102
103 msg.buf = b;
104
105 rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
106 kfree(b);
107
108 return rc;
81} 109}
82 110
83static int dib3000mc_identify(struct dib3000mc_state *state) 111static int dib3000mc_identify(struct dib3000mc_state *state)
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 7bec3e028bee..5553b89b804e 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -753,13 +753,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
753 struct i2c_adapter *i2c, 753 struct i2c_adapter *i2c,
754 unsigned int pll_desc_id) 754 unsigned int pll_desc_id)
755{ 755{
756 u8 b1 [] = { 0 }; 756 u8 *b1;
757 struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, 757 struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .len = 1 };
758 .buf = b1, .len = 1 };
759 struct dvb_pll_priv *priv = NULL; 758 struct dvb_pll_priv *priv = NULL;
760 int ret; 759 int ret;
761 const struct dvb_pll_desc *desc; 760 const struct dvb_pll_desc *desc;
762 761
762 b1 = kmalloc(1, GFP_KERNEL);
763 if (!b1)
764 return NULL;
765
766 b1[0] = 0;
767 msg.buf = b1;
768
763 if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) && 769 if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
764 (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list))) 770 (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
765 pll_desc_id = id[dvb_pll_devcount]; 771 pll_desc_id = id[dvb_pll_devcount];
@@ -773,15 +779,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
773 fe->ops.i2c_gate_ctrl(fe, 1); 779 fe->ops.i2c_gate_ctrl(fe, 1);
774 780
775 ret = i2c_transfer (i2c, &msg, 1); 781 ret = i2c_transfer (i2c, &msg, 1);
776 if (ret != 1) 782 if (ret != 1) {
783 kfree(b1);
777 return NULL; 784 return NULL;
785 }
778 if (fe->ops.i2c_gate_ctrl) 786 if (fe->ops.i2c_gate_ctrl)
779 fe->ops.i2c_gate_ctrl(fe, 0); 787 fe->ops.i2c_gate_ctrl(fe, 0);
780 } 788 }
781 789
782 priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); 790 priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
783 if (priv == NULL) 791 if (!priv) {
792 kfree(b1);
784 return NULL; 793 return NULL;
794 }
785 795
786 priv->pll_i2c_address = pll_addr; 796 priv->pll_i2c_address = pll_addr;
787 priv->i2c = i2c; 797 priv->i2c = i2c;
@@ -811,6 +821,8 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
811 "insmod option" : "autodetected"); 821 "insmod option" : "autodetected");
812 } 822 }
813 823
824 kfree(b1);
825
814 return fe; 826 return fe;
815} 827}
816EXPORT_SYMBOL(dvb_pll_attach); 828EXPORT_SYMBOL(dvb_pll_attach);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 7e7cc49b8674..3c4f7fa7b9d8 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -112,7 +112,7 @@ config VIDEO_PXA27x
112 112
113config VIDEO_QCOM_CAMSS 113config VIDEO_QCOM_CAMSS
114 tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver" 114 tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver"
115 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API 115 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
116 depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST 116 depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
117 select VIDEOBUF2_DMA_SG 117 select VIDEOBUF2_DMA_SG
118 select V4L2_FWNODE 118 select V4L2_FWNODE
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
index b21b3c2dc77f..b22d2dfcd3c2 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
@@ -2660,7 +2660,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
2660 * 2660 *
2661 * Return -EINVAL or zero on success 2661 * Return -EINVAL or zero on success
2662 */ 2662 */
2663int vfe_set_selection(struct v4l2_subdev *sd, 2663static int vfe_set_selection(struct v4l2_subdev *sd,
2664 struct v4l2_subdev_pad_config *cfg, 2664 struct v4l2_subdev_pad_config *cfg,
2665 struct v4l2_subdev_selection *sel) 2665 struct v4l2_subdev_selection *sel)
2666{ 2666{
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 68933d208063..9b2a401a4891 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -682,6 +682,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
682 hfi_session_abort(inst); 682 hfi_session_abort(inst);
683 683
684 load_scale_clocks(core); 684 load_scale_clocks(core);
685 INIT_LIST_HEAD(&inst->registeredbufs);
685 } 686 }
686 687
687 venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR); 688 venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
diff --git a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
index 1edf667d562a..146ae6f25cdb 100644
--- a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
+++ b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
@@ -172,7 +172,8 @@ u32 s5p_cec_get_status(struct s5p_cec_dev *cec)
172{ 172{
173 u32 status = 0; 173 u32 status = 0;
174 174
175 status = readb(cec->reg + S5P_CEC_STATUS_0); 175 status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf;
176 status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4;
176 status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8; 177 status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8;
177 status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16; 178 status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16;
178 status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24; 179 status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24;
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
index 58d200e7c838..8837e2678bde 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.c
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -92,7 +92,10 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
92 dev_dbg(cec->dev, "irq received\n"); 92 dev_dbg(cec->dev, "irq received\n");
93 93
94 if (status & CEC_STATUS_TX_DONE) { 94 if (status & CEC_STATUS_TX_DONE) {
95 if (status & CEC_STATUS_TX_ERROR) { 95 if (status & CEC_STATUS_TX_NACK) {
96 dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n");
97 cec->tx = STATE_NACK;
98 } else if (status & CEC_STATUS_TX_ERROR) {
96 dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n"); 99 dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n");
97 cec->tx = STATE_ERROR; 100 cec->tx = STATE_ERROR;
98 } else { 101 } else {
@@ -135,6 +138,12 @@ static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv)
135 cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0); 138 cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
136 cec->tx = STATE_IDLE; 139 cec->tx = STATE_IDLE;
137 break; 140 break;
141 case STATE_NACK:
142 cec_transmit_done(cec->adap,
143 CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK,
144 0, 1, 0, 0);
145 cec->tx = STATE_IDLE;
146 break;
138 case STATE_ERROR: 147 case STATE_ERROR:
139 cec_transmit_done(cec->adap, 148 cec_transmit_done(cec->adap,
140 CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR, 149 CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR,
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.h b/drivers/media/platform/s5p-cec/s5p_cec.h
index 8bcd8dc1aeb9..86ded522ef27 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.h
+++ b/drivers/media/platform/s5p-cec/s5p_cec.h
@@ -35,6 +35,7 @@
35#define CEC_STATUS_TX_TRANSFERRING (1 << 1) 35#define CEC_STATUS_TX_TRANSFERRING (1 << 1)
36#define CEC_STATUS_TX_DONE (1 << 2) 36#define CEC_STATUS_TX_DONE (1 << 2)
37#define CEC_STATUS_TX_ERROR (1 << 3) 37#define CEC_STATUS_TX_ERROR (1 << 3)
38#define CEC_STATUS_TX_NACK (1 << 4)
38#define CEC_STATUS_TX_BYTES (0xFF << 8) 39#define CEC_STATUS_TX_BYTES (0xFF << 8)
39#define CEC_STATUS_RX_RUNNING (1 << 16) 40#define CEC_STATUS_RX_RUNNING (1 << 16)
40#define CEC_STATUS_RX_RECEIVING (1 << 17) 41#define CEC_STATUS_RX_RECEIVING (1 << 17)
@@ -55,6 +56,7 @@ enum cec_state {
55 STATE_IDLE, 56 STATE_IDLE,
56 STATE_BUSY, 57 STATE_BUSY,
57 STATE_DONE, 58 STATE_DONE,
59 STATE_NACK,
58 STATE_ERROR 60 STATE_ERROR
59}; 61};
60 62
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 2e487f9a2cc3..4983eeb39f36 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -38,41 +38,74 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
38static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val) 38static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val)
39{ 39{
40 struct i2c_msg msg[2] = { 40 struct i2c_msg msg[2] = {
41 { .addr = priv->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 }, 41 { .addr = priv->cfg->i2c_address, .flags = 0, .len = 1 },
42 { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val, .len = 1 }, 42 { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .len = 1 },
43 }; 43 };
44 int rc = 0;
45 u8 *b;
46
47 b = kmalloc(2, GFP_KERNEL);
48 if (!b)
49 return -ENOMEM;
50
51 b[0] = reg;
52 b[1] = 0;
53
54 msg[0].buf = b;
55 msg[1].buf = b + 1;
44 56
45 if (i2c_transfer(priv->i2c, msg, 2) != 2) { 57 if (i2c_transfer(priv->i2c, msg, 2) != 2) {
46 printk(KERN_WARNING "mt2060 I2C read failed\n"); 58 printk(KERN_WARNING "mt2060 I2C read failed\n");
47 return -EREMOTEIO; 59 rc = -EREMOTEIO;
48 } 60 }
49 return 0; 61 *val = b[1];
62 kfree(b);
63
64 return rc;
50} 65}
51 66
52// Writes a single register 67// Writes a single register
53static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val) 68static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val)
54{ 69{
55 u8 buf[2] = { reg, val };
56 struct i2c_msg msg = { 70 struct i2c_msg msg = {
57 .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2 71 .addr = priv->cfg->i2c_address, .flags = 0, .len = 2
58 }; 72 };
73 u8 *buf;
74 int rc = 0;
75
76 buf = kmalloc(2, GFP_KERNEL);
77 if (!buf)
78 return -ENOMEM;
79
80 buf[0] = reg;
81 buf[1] = val;
82
83 msg.buf = buf;
59 84
60 if (i2c_transfer(priv->i2c, &msg, 1) != 1) { 85 if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
61 printk(KERN_WARNING "mt2060 I2C write failed\n"); 86 printk(KERN_WARNING "mt2060 I2C write failed\n");
62 return -EREMOTEIO; 87 rc = -EREMOTEIO;
63 } 88 }
64 return 0; 89 kfree(buf);
90 return rc;
65} 91}
66 92
67// Writes a set of consecutive registers 93// Writes a set of consecutive registers
68static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len) 94static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
69{ 95{
70 int rem, val_len; 96 int rem, val_len;
71 u8 xfer_buf[16]; 97 u8 *xfer_buf;
98 int rc = 0;
72 struct i2c_msg msg = { 99 struct i2c_msg msg = {
73 .addr = priv->cfg->i2c_address, .flags = 0, .buf = xfer_buf 100 .addr = priv->cfg->i2c_address, .flags = 0
74 }; 101 };
75 102
103 xfer_buf = kmalloc(16, GFP_KERNEL);
104 if (!xfer_buf)
105 return -ENOMEM;
106
107 msg.buf = xfer_buf;
108
76 for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) { 109 for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) {
77 val_len = min_t(int, rem, priv->i2c_max_regs); 110 val_len = min_t(int, rem, priv->i2c_max_regs);
78 msg.len = 1 + val_len; 111 msg.len = 1 + val_len;
@@ -81,11 +114,13 @@ static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
81 114
82 if (i2c_transfer(priv->i2c, &msg, 1) != 1) { 115 if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
83 printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len); 116 printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len);
84 return -EREMOTEIO; 117 rc = -EREMOTEIO;
118 break;
85 } 119 }
86 } 120 }
87 121
88 return 0; 122 kfree(xfer_buf);
123 return rc;
89} 124}
90 125
91// Initialisation sequences 126// Initialisation sequences
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index d0ccc6729fd2..67d787fa3306 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -448,6 +448,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
448 int err; 448 int err;
449 u32 val; 449 u32 val;
450 450
451 intel_host->d3_retune = true;
452
451 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); 453 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
452 if (err) { 454 if (err) {
453 pr_debug("%s: DSM not supported, error %d\n", 455 pr_debug("%s: DSM not supported, error %d\n",
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 13f0f219d8aa..a13a4896a8bd 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -182,22 +182,23 @@
182/* FLEXCAN hardware feature flags 182/* FLEXCAN hardware feature flags
183 * 183 *
184 * Below is some version info we got: 184 * Below is some version info we got:
185 * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err RTR re- 185 * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
186 * Filter? connected? detection ception in MB 186 * Filter? connected? Passive detection ception in MB
187 * MX25 FlexCAN2 03.00.00.00 no no no no 187 * MX25 FlexCAN2 03.00.00.00 no no ? no no
188 * MX28 FlexCAN2 03.00.04.00 yes yes no no 188 * MX28 FlexCAN2 03.00.04.00 yes yes no no no
189 * MX35 FlexCAN2 03.00.00.00 no no no no 189 * MX35 FlexCAN2 03.00.00.00 no no ? no no
190 * MX53 FlexCAN2 03.00.00.00 yes no no no 190 * MX53 FlexCAN2 03.00.00.00 yes no no no no
191 * MX6s FlexCAN3 10.00.12.00 yes yes no yes 191 * MX6s FlexCAN3 10.00.12.00 yes yes no no yes
192 * VF610 FlexCAN3 ? no yes yes yes? 192 * VF610 FlexCAN3 ? no yes ? yes yes?
193 * 193 *
194 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. 194 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
195 */ 195 */
196#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */ 196#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */
197#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ 197#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
198#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ 198#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
199#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ 199#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
200#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ 200#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
201#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
201 202
202/* Structure of the message buffer */ 203/* Structure of the message buffer */
203struct flexcan_mb { 204struct flexcan_mb {
@@ -281,14 +282,17 @@ struct flexcan_priv {
281}; 282};
282 283
283static const struct flexcan_devtype_data fsl_p1010_devtype_data = { 284static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
284 .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE, 285 .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
286 FLEXCAN_QUIRK_BROKEN_PERR_STATE,
285}; 287};
286 288
287static const struct flexcan_devtype_data fsl_imx28_devtype_data; 289static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
290 .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
291};
288 292
289static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { 293static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
290 .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | 294 .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
291 FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, 295 FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE,
292}; 296};
293 297
294static const struct flexcan_devtype_data fsl_vf610_devtype_data = { 298static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
@@ -335,6 +339,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
335} 339}
336#endif 340#endif
337 341
342static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
343{
344 struct flexcan_regs __iomem *regs = priv->regs;
345 u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
346
347 flexcan_write(reg_ctrl, &regs->ctrl);
348}
349
350static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
351{
352 struct flexcan_regs __iomem *regs = priv->regs;
353 u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
354
355 flexcan_write(reg_ctrl, &regs->ctrl);
356}
357
338static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) 358static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
339{ 359{
340 if (!priv->reg_xceiver) 360 if (!priv->reg_xceiver)
@@ -713,6 +733,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
713 struct flexcan_regs __iomem *regs = priv->regs; 733 struct flexcan_regs __iomem *regs = priv->regs;
714 irqreturn_t handled = IRQ_NONE; 734 irqreturn_t handled = IRQ_NONE;
715 u32 reg_iflag1, reg_esr; 735 u32 reg_iflag1, reg_esr;
736 enum can_state last_state = priv->can.state;
716 737
717 reg_iflag1 = flexcan_read(&regs->iflag1); 738 reg_iflag1 = flexcan_read(&regs->iflag1);
718 739
@@ -765,8 +786,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
765 flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr); 786 flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
766 } 787 }
767 788
768 /* state change interrupt */ 789 /* state change interrupt or broken error state quirk fix is enabled */
769 if (reg_esr & FLEXCAN_ESR_ERR_STATE) 790 if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
791 (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
792 FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
770 flexcan_irq_state(dev, reg_esr); 793 flexcan_irq_state(dev, reg_esr);
771 794
772 /* bus error IRQ - handle if bus error reporting is activated */ 795 /* bus error IRQ - handle if bus error reporting is activated */
@@ -774,6 +797,44 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
774 (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) 797 (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
775 flexcan_irq_bus_err(dev, reg_esr); 798 flexcan_irq_bus_err(dev, reg_esr);
776 799
800 /* availability of error interrupt among state transitions in case
801 * bus error reporting is de-activated and
802 * FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled:
803 * +--------------------------------------------------------------+
804 * | +----------------------------------------------+ [stopped / |
805 * | | | sleeping] -+
806 * +-+-> active <-> warning <-> passive -> bus off -+
807 * ___________^^^^^^^^^^^^_______________________________
808 * disabled(1) enabled disabled
809 *
810 * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled
811 */
812 if ((last_state != priv->can.state) &&
813 (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
814 !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
815 switch (priv->can.state) {
816 case CAN_STATE_ERROR_ACTIVE:
817 if (priv->devtype_data->quirks &
818 FLEXCAN_QUIRK_BROKEN_WERR_STATE)
819 flexcan_error_irq_enable(priv);
820 else
821 flexcan_error_irq_disable(priv);
822 break;
823
824 case CAN_STATE_ERROR_WARNING:
825 flexcan_error_irq_enable(priv);
826 break;
827
828 case CAN_STATE_ERROR_PASSIVE:
829 case CAN_STATE_BUS_OFF:
830 flexcan_error_irq_disable(priv);
831 break;
832
833 default:
834 break;
835 }
836 }
837
777 return handled; 838 return handled;
778} 839}
779 840
@@ -887,7 +948,7 @@ static int flexcan_chip_start(struct net_device *dev)
887 * on most Flexcan cores, too. Otherwise we don't get 948 * on most Flexcan cores, too. Otherwise we don't get
888 * any error warning or passive interrupts. 949 * any error warning or passive interrupts.
889 */ 950 */
890 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE || 951 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
891 priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 952 priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
892 reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; 953 reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
893 else 954 else
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index be928ce62d32..9fdb0f0bfa06 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
333 } 333 }
334 334
335 cf->can_id = id & ESD_IDMASK; 335 cf->can_id = id & ESD_IDMASK;
336 cf->can_dlc = get_can_dlc(msg->msg.rx.dlc); 336 cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
337 337
338 if (id & ESD_EXTID) 338 if (id & ESD_EXTID)
339 cf->can_id |= CAN_EFF_FLAG; 339 cf->can_id |= CAN_EFF_FLAG;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index afcc1312dbaf..68ac3e88a8ce 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
375 375
376 gs_free_tx_context(txc); 376 gs_free_tx_context(txc);
377 377
378 atomic_dec(&dev->active_tx_urbs);
379
378 netif_wake_queue(netdev); 380 netif_wake_queue(netdev);
379 } 381 }
380 382
@@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
463 urb->transfer_buffer_length, 465 urb->transfer_buffer_length,
464 urb->transfer_buffer, 466 urb->transfer_buffer,
465 urb->transfer_dma); 467 urb->transfer_dma);
466
467 atomic_dec(&dev->active_tx_urbs);
468
469 if (!netif_device_present(netdev))
470 return;
471
472 if (netif_queue_stopped(netdev))
473 netif_wake_queue(netdev);
474} 468}
475 469
476static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, 470static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index dce7fa57eb55..f123ed57630d 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -214,8 +214,14 @@ static int mv88e6060_setup(struct dsa_switch *ds)
214 214
215static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr) 215static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
216{ 216{
217 /* Use the same MAC Address as FD Pause frames for all ports */ 217 u16 val = addr[0] << 8 | addr[1];
218 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]); 218
219 /* The multicast bit is always transmitted as a zero, so the switch uses
220 * bit 8 for "DiffAddr", where 0 means all ports transmit the same SA.
221 */
222 val &= 0xfeff;
223
224 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val);
219 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]); 225 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
220 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]); 226 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
221 227
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index b1212debc2e1..967020fb26ee 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -742,8 +742,8 @@ static void ena_get_channels(struct net_device *netdev,
742{ 742{
743 struct ena_adapter *adapter = netdev_priv(netdev); 743 struct ena_adapter *adapter = netdev_priv(netdev);
744 744
745 channels->max_rx = ENA_MAX_NUM_IO_QUEUES; 745 channels->max_rx = adapter->num_queues;
746 channels->max_tx = ENA_MAX_NUM_IO_QUEUES; 746 channels->max_tx = adapter->num_queues;
747 channels->max_other = 0; 747 channels->max_other = 0;
748 channels->max_combined = 0; 748 channels->max_combined = 0;
749 channels->rx_count = adapter->num_queues; 749 channels->rx_count = adapter->num_queues;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index f7dc22f65d9f..c6bd5e24005d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -966,7 +966,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
966 u64_stats_update_begin(&rx_ring->syncp); 966 u64_stats_update_begin(&rx_ring->syncp);
967 rx_ring->rx_stats.bad_csum++; 967 rx_ring->rx_stats.bad_csum++;
968 u64_stats_update_end(&rx_ring->syncp); 968 u64_stats_update_end(&rx_ring->syncp);
969 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, 969 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
970 "RX IPv4 header checksum error\n"); 970 "RX IPv4 header checksum error\n");
971 return; 971 return;
972 } 972 }
@@ -979,7 +979,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
979 u64_stats_update_begin(&rx_ring->syncp); 979 u64_stats_update_begin(&rx_ring->syncp);
980 rx_ring->rx_stats.bad_csum++; 980 rx_ring->rx_stats.bad_csum++;
981 u64_stats_update_end(&rx_ring->syncp); 981 u64_stats_update_end(&rx_ring->syncp);
982 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, 982 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
983 "RX L4 checksum error\n"); 983 "RX L4 checksum error\n");
984 skb->ip_summed = CHECKSUM_NONE; 984 skb->ip_summed = CHECKSUM_NONE;
985 return; 985 return;
@@ -3064,7 +3064,8 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3064 if (ena_dev->mem_bar) 3064 if (ena_dev->mem_bar)
3065 devm_iounmap(&pdev->dev, ena_dev->mem_bar); 3065 devm_iounmap(&pdev->dev, ena_dev->mem_bar);
3066 3066
3067 devm_iounmap(&pdev->dev, ena_dev->reg_bar); 3067 if (ena_dev->reg_bar)
3068 devm_iounmap(&pdev->dev, ena_dev->reg_bar);
3068 3069
3069 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 3070 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3070 pci_release_selected_regions(pdev, release_bars); 3071 pci_release_selected_regions(pdev, release_bars);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 0fdaaa643073..57e796870595 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -22,8 +22,12 @@
22 22
23#define AQ_CFG_FORCE_LEGACY_INT 0U 23#define AQ_CFG_FORCE_LEGACY_INT 0U
24 24
25#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U 25#define AQ_CFG_INTERRUPT_MODERATION_OFF 0
26#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU 26#define AQ_CFG_INTERRUPT_MODERATION_ON 1
27#define AQ_CFG_INTERRUPT_MODERATION_AUTO 0xFFFFU
28
29#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
30
27#define AQ_CFG_IRQ_MASK 0x1FFU 31#define AQ_CFG_IRQ_MASK 0x1FFU
28 32
29#define AQ_CFG_VECS_MAX 8U 33#define AQ_CFG_VECS_MAX 8U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a761e91471df..d5e99b468870 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -56,10 +56,6 @@ aq_ethtool_set_link_ksettings(struct net_device *ndev,
56 return aq_nic_set_link_ksettings(aq_nic, cmd); 56 return aq_nic_set_link_ksettings(aq_nic, cmd);
57} 57}
58 58
59/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */
60static const unsigned int aq_ethtool_stat_queue_lines = 5U;
61static const unsigned int aq_ethtool_stat_queue_chars =
62 5U * ETH_GSTRING_LEN;
63static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { 59static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
64 "InPackets", 60 "InPackets",
65 "InUCast", 61 "InUCast",
@@ -83,56 +79,26 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
83 "InOctetsDma", 79 "InOctetsDma",
84 "OutOctetsDma", 80 "OutOctetsDma",
85 "InDroppedDma", 81 "InDroppedDma",
86 "Queue[0] InPackets", 82};
87 "Queue[0] OutPackets", 83
88 "Queue[0] InJumboPackets", 84static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
89 "Queue[0] InLroPackets", 85 "Queue[%d] InPackets",
90 "Queue[0] InErrors", 86 "Queue[%d] OutPackets",
91 "Queue[1] InPackets", 87 "Queue[%d] Restarts",
92 "Queue[1] OutPackets", 88 "Queue[%d] InJumboPackets",
93 "Queue[1] InJumboPackets", 89 "Queue[%d] InLroPackets",
94 "Queue[1] InLroPackets", 90 "Queue[%d] InErrors",
95 "Queue[1] InErrors",
96 "Queue[2] InPackets",
97 "Queue[2] OutPackets",
98 "Queue[2] InJumboPackets",
99 "Queue[2] InLroPackets",
100 "Queue[2] InErrors",
101 "Queue[3] InPackets",
102 "Queue[3] OutPackets",
103 "Queue[3] InJumboPackets",
104 "Queue[3] InLroPackets",
105 "Queue[3] InErrors",
106 "Queue[4] InPackets",
107 "Queue[4] OutPackets",
108 "Queue[4] InJumboPackets",
109 "Queue[4] InLroPackets",
110 "Queue[4] InErrors",
111 "Queue[5] InPackets",
112 "Queue[5] OutPackets",
113 "Queue[5] InJumboPackets",
114 "Queue[5] InLroPackets",
115 "Queue[5] InErrors",
116 "Queue[6] InPackets",
117 "Queue[6] OutPackets",
118 "Queue[6] InJumboPackets",
119 "Queue[6] InLroPackets",
120 "Queue[6] InErrors",
121 "Queue[7] InPackets",
122 "Queue[7] OutPackets",
123 "Queue[7] InJumboPackets",
124 "Queue[7] InLroPackets",
125 "Queue[7] InErrors",
126}; 91};
127 92
128static void aq_ethtool_stats(struct net_device *ndev, 93static void aq_ethtool_stats(struct net_device *ndev,
129 struct ethtool_stats *stats, u64 *data) 94 struct ethtool_stats *stats, u64 *data)
130{ 95{
131 struct aq_nic_s *aq_nic = netdev_priv(ndev); 96 struct aq_nic_s *aq_nic = netdev_priv(ndev);
97 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
132 98
133/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */ 99 memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
134 BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8); 100 ARRAY_SIZE(aq_ethtool_queue_stat_names) *
135 memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64)); 101 cfg->vecs) * sizeof(u64));
136 aq_nic_get_stats(aq_nic, data); 102 aq_nic_get_stats(aq_nic, data);
137} 103}
138 104
@@ -154,8 +120,8 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
154 120
155 strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "", 121 strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
156 sizeof(drvinfo->bus_info)); 122 sizeof(drvinfo->bus_info));
157 drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) - 123 drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
158 (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines; 124 cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
159 drvinfo->testinfo_len = 0; 125 drvinfo->testinfo_len = 0;
160 drvinfo->regdump_len = regs_count; 126 drvinfo->regdump_len = regs_count;
161 drvinfo->eedump_len = 0; 127 drvinfo->eedump_len = 0;
@@ -164,14 +130,25 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
164static void aq_ethtool_get_strings(struct net_device *ndev, 130static void aq_ethtool_get_strings(struct net_device *ndev,
165 u32 stringset, u8 *data) 131 u32 stringset, u8 *data)
166{ 132{
133 int i, si;
167 struct aq_nic_s *aq_nic = netdev_priv(ndev); 134 struct aq_nic_s *aq_nic = netdev_priv(ndev);
168 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); 135 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
169 136 u8 *p = data;
170 if (stringset == ETH_SS_STATS) 137
171 memcpy(data, *aq_ethtool_stat_names, 138 if (stringset == ETH_SS_STATS) {
172 sizeof(aq_ethtool_stat_names) - 139 memcpy(p, *aq_ethtool_stat_names,
173 (AQ_CFG_VECS_MAX - cfg->vecs) * 140 sizeof(aq_ethtool_stat_names));
174 aq_ethtool_stat_queue_chars); 141 p = p + sizeof(aq_ethtool_stat_names);
142 for (i = 0; i < cfg->vecs; i++) {
143 for (si = 0;
144 si < ARRAY_SIZE(aq_ethtool_queue_stat_names);
145 si++) {
146 snprintf(p, ETH_GSTRING_LEN,
147 aq_ethtool_queue_stat_names[si], i);
148 p += ETH_GSTRING_LEN;
149 }
150 }
151 }
175} 152}
176 153
177static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) 154static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
@@ -182,9 +159,8 @@ static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
182 159
183 switch (stringset) { 160 switch (stringset) {
184 case ETH_SS_STATS: 161 case ETH_SS_STATS:
185 ret = ARRAY_SIZE(aq_ethtool_stat_names) - 162 ret = ARRAY_SIZE(aq_ethtool_stat_names) +
186 (AQ_CFG_VECS_MAX - cfg->vecs) * 163 cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
187 aq_ethtool_stat_queue_lines;
188 break; 164 break;
189 default: 165 default:
190 ret = -EOPNOTSUPP; 166 ret = -EOPNOTSUPP;
@@ -245,6 +221,69 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
245 return err; 221 return err;
246} 222}
247 223
224int aq_ethtool_get_coalesce(struct net_device *ndev,
225 struct ethtool_coalesce *coal)
226{
227 struct aq_nic_s *aq_nic = netdev_priv(ndev);
228 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
229
230 if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
231 cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
232 coal->rx_coalesce_usecs = cfg->rx_itr;
233 coal->tx_coalesce_usecs = cfg->tx_itr;
234 coal->rx_max_coalesced_frames = 0;
235 coal->tx_max_coalesced_frames = 0;
236 } else {
237 coal->rx_coalesce_usecs = 0;
238 coal->tx_coalesce_usecs = 0;
239 coal->rx_max_coalesced_frames = 1;
240 coal->tx_max_coalesced_frames = 1;
241 }
242 return 0;
243}
244
245int aq_ethtool_set_coalesce(struct net_device *ndev,
246 struct ethtool_coalesce *coal)
247{
248 struct aq_nic_s *aq_nic = netdev_priv(ndev);
249 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
250
251 /* This is not yet supported
252 */
253 if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce)
254 return -EOPNOTSUPP;
255
256 /* Atlantic only supports timing based coalescing
257 */
258 if (coal->rx_max_coalesced_frames > 1 ||
259 coal->rx_coalesce_usecs_irq ||
260 coal->rx_max_coalesced_frames_irq)
261 return -EOPNOTSUPP;
262
263 if (coal->tx_max_coalesced_frames > 1 ||
264 coal->tx_coalesce_usecs_irq ||
265 coal->tx_max_coalesced_frames_irq)
266 return -EOPNOTSUPP;
267
268 /* We do not support frame counting. Check this
269 */
270 if (!(coal->rx_max_coalesced_frames == !coal->rx_coalesce_usecs))
271 return -EOPNOTSUPP;
272 if (!(coal->tx_max_coalesced_frames == !coal->tx_coalesce_usecs))
273 return -EOPNOTSUPP;
274
275 if (coal->rx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX ||
276 coal->tx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX)
277 return -EINVAL;
278
279 cfg->itr = AQ_CFG_INTERRUPT_MODERATION_ON;
280
281 cfg->rx_itr = coal->rx_coalesce_usecs;
282 cfg->tx_itr = coal->tx_coalesce_usecs;
283
284 return aq_nic_update_interrupt_moderation_settings(aq_nic);
285}
286
248const struct ethtool_ops aq_ethtool_ops = { 287const struct ethtool_ops aq_ethtool_ops = {
249 .get_link = aq_ethtool_get_link, 288 .get_link = aq_ethtool_get_link,
250 .get_regs_len = aq_ethtool_get_regs_len, 289 .get_regs_len = aq_ethtool_get_regs_len,
@@ -259,4 +298,6 @@ const struct ethtool_ops aq_ethtool_ops = {
259 .get_ethtool_stats = aq_ethtool_stats, 298 .get_ethtool_stats = aq_ethtool_stats,
260 .get_link_ksettings = aq_ethtool_get_link_ksettings, 299 .get_link_ksettings = aq_ethtool_get_link_ksettings,
261 .set_link_ksettings = aq_ethtool_set_link_ksettings, 300 .set_link_ksettings = aq_ethtool_set_link_ksettings,
301 .get_coalesce = aq_ethtool_get_coalesce,
302 .set_coalesce = aq_ethtool_set_coalesce,
262}; 303};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index bf9b3f020e10..0207927dc8a6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -151,8 +151,7 @@ struct aq_hw_ops {
151 [ETH_ALEN], 151 [ETH_ALEN],
152 u32 count); 152 u32 count);
153 153
154 int (*hw_interrupt_moderation_set)(struct aq_hw_s *self, 154 int (*hw_interrupt_moderation_set)(struct aq_hw_s *self);
155 bool itr_enabled);
156 155
157 int (*hw_rss_set)(struct aq_hw_s *self, 156 int (*hw_rss_set)(struct aq_hw_s *self,
158 struct aq_rss_parameters *rss_params); 157 struct aq_rss_parameters *rss_params);
@@ -163,6 +162,8 @@ struct aq_hw_ops {
163 int (*hw_get_regs)(struct aq_hw_s *self, 162 int (*hw_get_regs)(struct aq_hw_s *self,
164 struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); 163 struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
165 164
165 int (*hw_update_stats)(struct aq_hw_s *self);
166
166 int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, 167 int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
167 unsigned int *p_count); 168 unsigned int *p_count);
168 169
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 0a5bb4114eb4..483e97691eea 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -16,6 +16,7 @@
16#include "aq_pci_func.h" 16#include "aq_pci_func.h"
17#include "aq_nic_internal.h" 17#include "aq_nic_internal.h"
18 18
19#include <linux/moduleparam.h>
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
20#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
21#include <linux/timer.h> 22#include <linux/timer.h>
@@ -24,6 +25,18 @@
24#include <linux/tcp.h> 25#include <linux/tcp.h>
25#include <net/ip.h> 26#include <net/ip.h>
26 27
28static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
29module_param_named(aq_itr, aq_itr, uint, 0644);
30MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
31
32static unsigned int aq_itr_tx;
33module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
34MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
35
36static unsigned int aq_itr_rx;
37module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
38MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
39
27static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 40static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
28{ 41{
29 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 42 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -61,9 +74,9 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
61 74
62 cfg->is_polling = AQ_CFG_IS_POLLING_DEF; 75 cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
63 76
64 cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF; 77 cfg->itr = aq_itr;
65 cfg->itr = cfg->is_interrupt_moderation ? 78 cfg->tx_itr = aq_itr_tx;
66 AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U; 79 cfg->rx_itr = aq_itr_rx;
67 80
68 cfg->is_rss = AQ_CFG_IS_RSS_DEF; 81 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
69 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; 82 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
@@ -126,10 +139,12 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
126 if (err) 139 if (err)
127 return err; 140 return err;
128 141
129 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) 142 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
130 pr_info("%s: link change old %d new %d\n", 143 pr_info("%s: link change old %d new %d\n",
131 AQ_CFG_DRV_NAME, self->link_status.mbps, 144 AQ_CFG_DRV_NAME, self->link_status.mbps,
132 self->aq_hw->aq_link_status.mbps); 145 self->aq_hw->aq_link_status.mbps);
146 aq_nic_update_interrupt_moderation_settings(self);
147 }
133 148
134 self->link_status = self->aq_hw->aq_link_status; 149 self->link_status = self->aq_hw->aq_link_status;
135 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { 150 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
@@ -164,8 +179,8 @@ static void aq_nic_service_timer_cb(unsigned long param)
164 if (err) 179 if (err)
165 goto err_exit; 180 goto err_exit;
166 181
167 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 182 if (self->aq_hw_ops.hw_update_stats)
168 self->aq_nic_cfg.is_interrupt_moderation); 183 self->aq_hw_ops.hw_update_stats(self->aq_hw);
169 184
170 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 185 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
171 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 186 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
@@ -334,6 +349,7 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
334 } 349 }
335 if (netif_running(ndev)) 350 if (netif_running(ndev))
336 netif_tx_disable(ndev); 351 netif_tx_disable(ndev);
352 netif_carrier_off(self->ndev);
337 353
338 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; 354 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
339 self->aq_vecs++) { 355 self->aq_vecs++) {
@@ -421,9 +437,8 @@ int aq_nic_start(struct aq_nic_s *self)
421 if (err < 0) 437 if (err < 0)
422 goto err_exit; 438 goto err_exit;
423 439
424 err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 440 err = aq_nic_update_interrupt_moderation_settings(self);
425 self->aq_nic_cfg.is_interrupt_moderation); 441 if (err)
426 if (err < 0)
427 goto err_exit; 442 goto err_exit;
428 setup_timer(&self->service_timer, &aq_nic_service_timer_cb, 443 setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
429 (unsigned long)self); 444 (unsigned long)self);
@@ -645,6 +660,11 @@ err_exit:
645 return err; 660 return err;
646} 661}
647 662
663int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
664{
665 return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw);
666}
667
648int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) 668int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
649{ 669{
650 int err = 0; 670 int err = 0;
@@ -899,6 +919,7 @@ int aq_nic_stop(struct aq_nic_s *self)
899 unsigned int i = 0U; 919 unsigned int i = 0U;
900 920
901 netif_tx_disable(self->ndev); 921 netif_tx_disable(self->ndev);
922 netif_carrier_off(self->ndev);
902 923
903 del_timer_sync(&self->service_timer); 924 del_timer_sync(&self->service_timer);
904 925
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 0ddd556ff901..4309983acdd6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -40,6 +40,8 @@ struct aq_nic_cfg_s {
40 u32 vecs; /* vecs==allocated irqs */ 40 u32 vecs; /* vecs==allocated irqs */
41 u32 irq_type; 41 u32 irq_type;
42 u32 itr; 42 u32 itr;
43 u16 rx_itr;
44 u16 tx_itr;
43 u32 num_rss_queues; 45 u32 num_rss_queues;
44 u32 mtu; 46 u32 mtu;
45 u32 ucp_0x364; 47 u32 ucp_0x364;
@@ -49,7 +51,6 @@ struct aq_nic_cfg_s {
49 u16 is_mc_list_enabled; 51 u16 is_mc_list_enabled;
50 u16 mc_list_count; 52 u16 mc_list_count;
51 bool is_autoneg; 53 bool is_autoneg;
52 bool is_interrupt_moderation;
53 bool is_polling; 54 bool is_polling;
54 bool is_rss; 55 bool is_rss;
55 bool is_lro; 56 bool is_lro;
@@ -104,5 +105,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
104struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); 105struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
105u32 aq_nic_get_fw_version(struct aq_nic_s *self); 106u32 aq_nic_get_fw_version(struct aq_nic_s *self);
106int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); 107int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
108int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
107 109
108#endif /* AQ_NIC_H */ 110#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 4c6c882c6a1c..cadaa646c89f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -85,6 +85,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
85 int err = 0; 85 int err = 0;
86 unsigned int bar = 0U; 86 unsigned int bar = 0U;
87 unsigned int port = 0U; 87 unsigned int port = 0U;
88 unsigned int numvecs = 0U;
88 89
89 err = pci_enable_device(self->pdev); 90 err = pci_enable_device(self->pdev);
90 if (err < 0) 91 if (err < 0)
@@ -142,10 +143,12 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
142 } 143 }
143 } 144 }
144 145
145 /*enable interrupts */ 146 numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
147 numvecs = min(numvecs, num_online_cpus());
148
149 /* enable interrupts */
146#if !AQ_CFG_FORCE_LEGACY_INT 150#if !AQ_CFG_FORCE_LEGACY_INT
147 err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs, 151 err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
148 self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX);
149 152
150 if (err < 0) { 153 if (err < 0) {
151 err = pci_alloc_irq_vectors(self->pdev, 1, 1, 154 err = pci_alloc_irq_vectors(self->pdev, 1, 1,
@@ -153,7 +156,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
153 if (err < 0) 156 if (err < 0)
154 goto err_exit; 157 goto err_exit;
155 } 158 }
156#endif 159#endif /* AQ_CFG_FORCE_LEGACY_INT */
157 160
158 /* net device init */ 161 /* net device init */
159 for (port = 0; port < self->ports; ++port) { 162 for (port = 0; port < self->ports; ++port) {
@@ -265,6 +268,9 @@ void aq_pci_func_free(struct aq_pci_func_s *self)
265 aq_nic_ndev_free(self->port[port]); 268 aq_nic_ndev_free(self->port[port]);
266 } 269 }
267 270
271 if (self->mmio)
272 iounmap(self->mmio);
273
268 kfree(self); 274 kfree(self);
269 275
270err_exit:; 276err_exit:;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 305ff8ffac2c..5fecc9a099ef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -373,8 +373,11 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
373 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 373 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
374 aq_vec_add_stats(self, &stats_rx, &stats_tx); 374 aq_vec_add_stats(self, &stats_rx, &stats_tx);
375 375
376 /* This data should mimic aq_ethtool_queue_stat_names structure
377 */
376 data[count] += stats_rx.packets; 378 data[count] += stats_rx.packets;
377 data[++count] += stats_tx.packets; 379 data[++count] += stats_tx.packets;
380 data[++count] += stats_tx.queue_restarts;
378 data[++count] += stats_rx.jumbo_packets; 381 data[++count] += stats_rx.jumbo_packets;
379 data[++count] += stats_rx.lro_packets; 382 data[++count] += stats_rx.lro_packets;
380 data[++count] += stats_rx.errors; 383 data[++count] += stats_rx.errors;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index c5a02df7a48b..07b3c49a16a4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -765,24 +765,23 @@ err_exit:
765 return err; 765 return err;
766} 766}
767 767
768static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, 768static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
769 bool itr_enabled)
770{ 769{
771 unsigned int i = 0U; 770 unsigned int i = 0U;
771 u32 itr_rx;
772 772
773 if (itr_enabled && self->aq_nic_cfg->itr) { 773 if (self->aq_nic_cfg->itr) {
774 if (self->aq_nic_cfg->itr != 0xFFFFU) { 774 if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) {
775 u32 itr_ = (self->aq_nic_cfg->itr >> 1); 775 u32 itr_ = (self->aq_nic_cfg->itr >> 1);
776 776
777 itr_ = min(AQ_CFG_IRQ_MASK, itr_); 777 itr_ = min(AQ_CFG_IRQ_MASK, itr_);
778 778
779 PHAL_ATLANTIC_A0->itr_rx = 0x80000000U | 779 itr_rx = 0x80000000U | (itr_ << 0x10);
780 (itr_ << 0x10);
781 } else { 780 } else {
782 u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); 781 u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
783 782
784 if (n < self->aq_link_status.mbps) { 783 if (n < self->aq_link_status.mbps) {
785 PHAL_ATLANTIC_A0->itr_rx = 0U; 784 itr_rx = 0U;
786 } else { 785 } else {
787 static unsigned int hw_timers_tbl_[] = { 786 static unsigned int hw_timers_tbl_[] = {
788 0x01CU, /* 10Gbit */ 787 0x01CU, /* 10Gbit */
@@ -797,8 +796,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
797 hw_atl_utils_mbps_2_speed_index( 796 hw_atl_utils_mbps_2_speed_index(
798 self->aq_link_status.mbps); 797 self->aq_link_status.mbps);
799 798
800 PHAL_ATLANTIC_A0->itr_rx = 799 itr_rx = 0x80000000U |
801 0x80000000U |
802 (hw_timers_tbl_[speed_index] << 0x10U); 800 (hw_timers_tbl_[speed_index] << 0x10U);
803 } 801 }
804 802
@@ -806,11 +804,11 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
806 aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); 804 aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
807 } 805 }
808 } else { 806 } else {
809 PHAL_ATLANTIC_A0->itr_rx = 0U; 807 itr_rx = 0U;
810 } 808 }
811 809
812 for (i = HW_ATL_A0_RINGS_MAX; i--;) 810 for (i = HW_ATL_A0_RINGS_MAX; i--;)
813 reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i); 811 reg_irq_thr_set(self, itr_rx, i);
814 812
815 return aq_hw_err_from_flags(self); 813 return aq_hw_err_from_flags(self);
816} 814}
@@ -885,6 +883,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
885 .hw_rss_set = hw_atl_a0_hw_rss_set, 883 .hw_rss_set = hw_atl_a0_hw_rss_set,
886 .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, 884 .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
887 .hw_get_regs = hw_atl_utils_hw_get_regs, 885 .hw_get_regs = hw_atl_utils_hw_get_regs,
886 .hw_update_stats = hw_atl_utils_update_stats,
888 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 887 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
889 .hw_get_fw_version = hw_atl_utils_get_fw_version, 888 .hw_get_fw_version = hw_atl_utils_get_fw_version,
890}; 889};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 21784cc39dab..ec68c20efcbd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -788,39 +788,45 @@ err_exit:
788 return err; 788 return err;
789} 789}
790 790
791static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, 791static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
792 bool itr_enabled)
793{ 792{
794 unsigned int i = 0U; 793 unsigned int i = 0U;
794 u32 itr_tx = 2U;
795 u32 itr_rx = 2U;
795 796
796 if (itr_enabled && self->aq_nic_cfg->itr) { 797 switch (self->aq_nic_cfg->itr) {
798 case AQ_CFG_INTERRUPT_MODERATION_ON:
799 case AQ_CFG_INTERRUPT_MODERATION_AUTO:
797 tdm_tx_desc_wr_wb_irq_en_set(self, 0U); 800 tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
798 tdm_tdm_intr_moder_en_set(self, 1U); 801 tdm_tdm_intr_moder_en_set(self, 1U);
799 rdm_rx_desc_wr_wb_irq_en_set(self, 0U); 802 rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
800 rdm_rdm_intr_moder_en_set(self, 1U); 803 rdm_rdm_intr_moder_en_set(self, 1U);
801 804
802 PHAL_ATLANTIC_B0->itr_tx = 2U; 805 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
803 PHAL_ATLANTIC_B0->itr_rx = 2U; 806 /* HW timers are in 2us units */
807 int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
808 int tx_min_timer = tx_max_timer / 2;
804 809
805 if (self->aq_nic_cfg->itr != 0xFFFFU) { 810 int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
806 unsigned int max_timer = self->aq_nic_cfg->itr / 2U; 811 int rx_min_timer = rx_max_timer / 2;
807 unsigned int min_timer = self->aq_nic_cfg->itr / 32U;
808 812
809 max_timer = min(0x1FFU, max_timer); 813 tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
810 min_timer = min(0xFFU, min_timer); 814 tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
815 rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
816 rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
811 817
812 PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U; 818 itr_tx |= tx_min_timer << 0x8U;
813 PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U; 819 itr_tx |= tx_max_timer << 0x10U;
814 PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U; 820 itr_rx |= rx_min_timer << 0x8U;
815 PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U; 821 itr_rx |= rx_max_timer << 0x10U;
816 } else { 822 } else {
817 static unsigned int hw_atl_b0_timers_table_tx_[][2] = { 823 static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
818 {0xffU, 0xffU}, /* 10Gbit */ 824 {0xfU, 0xffU}, /* 10Gbit */
819 {0xffU, 0x1ffU}, /* 5Gbit */ 825 {0xfU, 0x1ffU}, /* 5Gbit */
820 {0xffU, 0x1ffU}, /* 5Gbit 5GS */ 826 {0xfU, 0x1ffU}, /* 5Gbit 5GS */
821 {0xffU, 0x1ffU}, /* 2.5Gbit */ 827 {0xfU, 0x1ffU}, /* 2.5Gbit */
822 {0xffU, 0x1ffU}, /* 1Gbit */ 828 {0xfU, 0x1ffU}, /* 1Gbit */
823 {0xffU, 0x1ffU}, /* 100Mbit */ 829 {0xfU, 0x1ffU}, /* 100Mbit */
824 }; 830 };
825 831
826 static unsigned int hw_atl_b0_timers_table_rx_[][2] = { 832 static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
@@ -836,34 +842,36 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
836 hw_atl_utils_mbps_2_speed_index( 842 hw_atl_utils_mbps_2_speed_index(
837 self->aq_link_status.mbps); 843 self->aq_link_status.mbps);
838 844
839 PHAL_ATLANTIC_B0->itr_tx |= 845 /* Update user visible ITR settings */
840 hw_atl_b0_timers_table_tx_[speed_index] 846 self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
841 [0] << 0x8U; /* set min timer value */ 847 [speed_index][1] * 2;
842 PHAL_ATLANTIC_B0->itr_tx |= 848 self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
843 hw_atl_b0_timers_table_tx_[speed_index] 849 [speed_index][1] * 2;
844 [1] << 0x10U; /* set max timer value */ 850
845 851 itr_tx |= hw_atl_b0_timers_table_tx_
846 PHAL_ATLANTIC_B0->itr_rx |= 852 [speed_index][0] << 0x8U;
847 hw_atl_b0_timers_table_rx_[speed_index] 853 itr_tx |= hw_atl_b0_timers_table_tx_
848 [0] << 0x8U; /* set min timer value */ 854 [speed_index][1] << 0x10U;
849 PHAL_ATLANTIC_B0->itr_rx |= 855
850 hw_atl_b0_timers_table_rx_[speed_index] 856 itr_rx |= hw_atl_b0_timers_table_rx_
851 [1] << 0x10U; /* set max timer value */ 857 [speed_index][0] << 0x8U;
858 itr_rx |= hw_atl_b0_timers_table_rx_
859 [speed_index][1] << 0x10U;
852 } 860 }
853 } else { 861 break;
862 case AQ_CFG_INTERRUPT_MODERATION_OFF:
854 tdm_tx_desc_wr_wb_irq_en_set(self, 1U); 863 tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
855 tdm_tdm_intr_moder_en_set(self, 0U); 864 tdm_tdm_intr_moder_en_set(self, 0U);
856 rdm_rx_desc_wr_wb_irq_en_set(self, 1U); 865 rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
857 rdm_rdm_intr_moder_en_set(self, 0U); 866 rdm_rdm_intr_moder_en_set(self, 0U);
858 PHAL_ATLANTIC_B0->itr_tx = 0U; 867 itr_tx = 0U;
859 PHAL_ATLANTIC_B0->itr_rx = 0U; 868 itr_rx = 0U;
869 break;
860 } 870 }
861 871
862 for (i = HW_ATL_B0_RINGS_MAX; i--;) { 872 for (i = HW_ATL_B0_RINGS_MAX; i--;) {
863 reg_tx_intr_moder_ctrl_set(self, 873 reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
864 PHAL_ATLANTIC_B0->itr_tx, i); 874 reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
865 reg_rx_intr_moder_ctrl_set(self,
866 PHAL_ATLANTIC_B0->itr_rx, i);
867 } 875 }
868 876
869 return aq_hw_err_from_flags(self); 877 return aq_hw_err_from_flags(self);
@@ -939,6 +947,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
939 .hw_rss_set = hw_atl_b0_hw_rss_set, 947 .hw_rss_set = hw_atl_b0_hw_rss_set,
940 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, 948 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
941 .hw_get_regs = hw_atl_utils_hw_get_regs, 949 .hw_get_regs = hw_atl_utils_hw_get_regs,
950 .hw_update_stats = hw_atl_utils_update_stats,
942 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 951 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
943 .hw_get_fw_version = hw_atl_utils_get_fw_version, 952 .hw_get_fw_version = hw_atl_utils_get_fw_version,
944}; 953};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index fcf89e25a773..9aa2c6edfca2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -139,6 +139,9 @@
139 139
140#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U 140#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
141 141
142#define HW_ATL_INTR_MODER_MAX 0x1FF
143#define HW_ATL_INTR_MODER_MIN 0xFF
144
142/* Hardware tx descriptor */ 145/* Hardware tx descriptor */
143struct __packed hw_atl_txd_s { 146struct __packed hw_atl_txd_s {
144 u64 buf_addr; 147 u64 buf_addr;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index bf734b32e44b..1fe016fc4bc7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -255,6 +255,15 @@ err_exit:
255 return err; 255 return err;
256} 256}
257 257
258int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
259 struct hw_aq_atl_utils_mbox_header *pmbox)
260{
261 return hw_atl_utils_fw_downld_dwords(self,
262 PHAL_ATLANTIC->mbox_addr,
263 (u32 *)(void *)pmbox,
264 sizeof(*pmbox) / sizeof(u32));
265}
266
258void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, 267void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
259 struct hw_aq_atl_utils_mbox *pmbox) 268 struct hw_aq_atl_utils_mbox *pmbox)
260{ 269{
@@ -267,9 +276,6 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
267 if (err < 0) 276 if (err < 0)
268 goto err_exit; 277 goto err_exit;
269 278
270 if (pmbox != &PHAL_ATLANTIC->mbox)
271 memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox));
272
273 if (IS_CHIP_FEATURE(REVISION_A0)) { 279 if (IS_CHIP_FEATURE(REVISION_A0)) {
274 unsigned int mtu = self->aq_nic_cfg ? 280 unsigned int mtu = self->aq_nic_cfg ?
275 self->aq_nic_cfg->mtu : 1514U; 281 self->aq_nic_cfg->mtu : 1514U;
@@ -299,17 +305,17 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
299{ 305{
300 int err = 0; 306 int err = 0;
301 u32 transaction_id = 0; 307 u32 transaction_id = 0;
308 struct hw_aq_atl_utils_mbox_header mbox;
302 309
303 if (state == MPI_RESET) { 310 if (state == MPI_RESET) {
304 hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); 311 hw_atl_utils_mpi_read_mbox(self, &mbox);
305 312
306 transaction_id = PHAL_ATLANTIC->mbox.transaction_id; 313 transaction_id = mbox.transaction_id;
307 314
308 AQ_HW_WAIT_FOR(transaction_id != 315 AQ_HW_WAIT_FOR(transaction_id !=
309 (hw_atl_utils_mpi_read_stats 316 (hw_atl_utils_mpi_read_mbox(self, &mbox),
310 (self, &PHAL_ATLANTIC->mbox), 317 mbox.transaction_id),
311 PHAL_ATLANTIC->mbox.transaction_id), 318 1000U, 100U);
312 1000U, 100U);
313 if (err < 0) 319 if (err < 0)
314 goto err_exit; 320 goto err_exit;
315 } 321 }
@@ -492,16 +498,51 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
492 return 0; 498 return 0;
493} 499}
494 500
501int hw_atl_utils_update_stats(struct aq_hw_s *self)
502{
503 struct hw_atl_s *hw_self = PHAL_ATLANTIC;
504 struct hw_aq_atl_utils_mbox mbox;
505
506 if (!self->aq_link_status.mbps)
507 return 0;
508
509 hw_atl_utils_mpi_read_stats(self, &mbox);
510
511#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
512 mbox.stats._N_ - hw_self->last_stats._N_)
513
514 AQ_SDELTA(uprc);
515 AQ_SDELTA(mprc);
516 AQ_SDELTA(bprc);
517 AQ_SDELTA(erpt);
518
519 AQ_SDELTA(uptc);
520 AQ_SDELTA(mptc);
521 AQ_SDELTA(bptc);
522 AQ_SDELTA(erpr);
523
524 AQ_SDELTA(ubrc);
525 AQ_SDELTA(ubtc);
526 AQ_SDELTA(mbrc);
527 AQ_SDELTA(mbtc);
528 AQ_SDELTA(bbrc);
529 AQ_SDELTA(bbtc);
530 AQ_SDELTA(dpc);
531
532#undef AQ_SDELTA
533
534 memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
535
536 return 0;
537}
538
495int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 539int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
496 u64 *data, unsigned int *p_count) 540 u64 *data, unsigned int *p_count)
497{ 541{
498 struct hw_atl_stats_s *stats = NULL; 542 struct hw_atl_s *hw_self = PHAL_ATLANTIC;
543 struct hw_atl_stats_s *stats = &hw_self->curr_stats;
499 int i = 0; 544 int i = 0;
500 545
501 hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
502
503 stats = &PHAL_ATLANTIC->mbox.stats;
504
505 data[i] = stats->uprc + stats->mprc + stats->bprc; 546 data[i] = stats->uprc + stats->mprc + stats->bprc;
506 data[++i] = stats->uprc; 547 data[++i] = stats->uprc;
507 data[++i] = stats->mprc; 548 data[++i] = stats->mprc;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index e0360a6b2202..c99cc690e425 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -115,19 +115,22 @@ struct __packed hw_aq_atl_utils_fw_rpc {
115 }; 115 };
116}; 116};
117 117
118struct __packed hw_aq_atl_utils_mbox { 118struct __packed hw_aq_atl_utils_mbox_header {
119 u32 version; 119 u32 version;
120 u32 transaction_id; 120 u32 transaction_id;
121 int error; 121 u32 error;
122};
123
124struct __packed hw_aq_atl_utils_mbox {
125 struct hw_aq_atl_utils_mbox_header header;
122 struct hw_atl_stats_s stats; 126 struct hw_atl_stats_s stats;
123}; 127};
124 128
125struct __packed hw_atl_s { 129struct __packed hw_atl_s {
126 struct aq_hw_s base; 130 struct aq_hw_s base;
127 struct hw_aq_atl_utils_mbox mbox; 131 struct hw_atl_stats_s last_stats;
132 struct hw_atl_stats_s curr_stats;
128 u64 speed; 133 u64 speed;
129 u32 itr_tx;
130 u32 itr_rx;
131 unsigned int chip_features; 134 unsigned int chip_features;
132 u32 fw_ver_actual; 135 u32 fw_ver_actual;
133 atomic_t dpc; 136 atomic_t dpc;
@@ -170,6 +173,9 @@ enum hal_atl_utils_fw_state_e {
170 173
171void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); 174void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
172 175
176int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
177 struct hw_aq_atl_utils_mbox_header *pmbox);
178
173void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, 179void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
174 struct hw_aq_atl_utils_mbox *pmbox); 180 struct hw_aq_atl_utils_mbox *pmbox);
175 181
@@ -199,6 +205,8 @@ int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
199 205
200int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); 206int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
201 207
208int hw_atl_utils_update_stats(struct aq_hw_s *self);
209
202int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 210int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
203 u64 *data, 211 u64 *data,
204 unsigned int *p_count); 212 unsigned int *p_count);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aacec8bc19d5..dc5de275352a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -214,6 +214,8 @@ static const u16 bnxt_async_events_arr[] = {
214 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 214 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
215}; 215};
216 216
217static struct workqueue_struct *bnxt_pf_wq;
218
217static bool bnxt_vf_pciid(enum board_idx idx) 219static bool bnxt_vf_pciid(enum board_idx idx)
218{ 220{
219 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); 221 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
@@ -1024,12 +1026,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
1024 return 0; 1026 return 0;
1025} 1027}
1026 1028
1029static void bnxt_queue_sp_work(struct bnxt *bp)
1030{
1031 if (BNXT_PF(bp))
1032 queue_work(bnxt_pf_wq, &bp->sp_task);
1033 else
1034 schedule_work(&bp->sp_task);
1035}
1036
1037static void bnxt_cancel_sp_work(struct bnxt *bp)
1038{
1039 if (BNXT_PF(bp))
1040 flush_workqueue(bnxt_pf_wq);
1041 else
1042 cancel_work_sync(&bp->sp_task);
1043}
1044
1027static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1045static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1028{ 1046{
1029 if (!rxr->bnapi->in_reset) { 1047 if (!rxr->bnapi->in_reset) {
1030 rxr->bnapi->in_reset = true; 1048 rxr->bnapi->in_reset = true;
1031 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1049 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1032 schedule_work(&bp->sp_task); 1050 bnxt_queue_sp_work(bp);
1033 } 1051 }
1034 rxr->rx_next_cons = 0xffff; 1052 rxr->rx_next_cons = 0xffff;
1035} 1053}
@@ -1717,7 +1735,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
1717 default: 1735 default:
1718 goto async_event_process_exit; 1736 goto async_event_process_exit;
1719 } 1737 }
1720 schedule_work(&bp->sp_task); 1738 bnxt_queue_sp_work(bp);
1721async_event_process_exit: 1739async_event_process_exit:
1722 bnxt_ulp_async_events(bp, cmpl); 1740 bnxt_ulp_async_events(bp, cmpl);
1723 return 0; 1741 return 0;
@@ -1751,7 +1769,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1751 1769
1752 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 1770 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1753 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 1771 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1754 schedule_work(&bp->sp_task); 1772 bnxt_queue_sp_work(bp);
1755 break; 1773 break;
1756 1774
1757 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 1775 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
@@ -3448,6 +3466,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3448 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 3466 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3449} 3467}
3450 3468
3469int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3470 int timeout)
3471{
3472 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3473}
3474
3451int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3475int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3452{ 3476{
3453 int rc; 3477 int rc;
@@ -6327,7 +6351,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6327 } 6351 }
6328 6352
6329 if (link_re_init) { 6353 if (link_re_init) {
6354 mutex_lock(&bp->link_lock);
6330 rc = bnxt_update_phy_setting(bp); 6355 rc = bnxt_update_phy_setting(bp);
6356 mutex_unlock(&bp->link_lock);
6331 if (rc) 6357 if (rc)
6332 netdev_warn(bp->dev, "failed to update phy settings\n"); 6358 netdev_warn(bp->dev, "failed to update phy settings\n");
6333 } 6359 }
@@ -6647,7 +6673,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
6647 vnic->rx_mask = mask; 6673 vnic->rx_mask = mask;
6648 6674
6649 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 6675 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
6650 schedule_work(&bp->sp_task); 6676 bnxt_queue_sp_work(bp);
6651 } 6677 }
6652} 6678}
6653 6679
@@ -6920,7 +6946,7 @@ static void bnxt_tx_timeout(struct net_device *dev)
6920 6946
6921 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 6947 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
6922 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 6948 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6923 schedule_work(&bp->sp_task); 6949 bnxt_queue_sp_work(bp);
6924} 6950}
6925 6951
6926#ifdef CONFIG_NET_POLL_CONTROLLER 6952#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6952,7 +6978,7 @@ static void bnxt_timer(unsigned long data)
6952 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 6978 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
6953 bp->stats_coal_ticks) { 6979 bp->stats_coal_ticks) {
6954 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 6980 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6955 schedule_work(&bp->sp_task); 6981 bnxt_queue_sp_work(bp);
6956 } 6982 }
6957bnxt_restart_timer: 6983bnxt_restart_timer:
6958 mod_timer(&bp->timer, jiffies + bp->current_interval); 6984 mod_timer(&bp->timer, jiffies + bp->current_interval);
@@ -7025,30 +7051,28 @@ static void bnxt_sp_task(struct work_struct *work)
7025 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) 7051 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
7026 bnxt_hwrm_port_qstats(bp); 7052 bnxt_hwrm_port_qstats(bp);
7027 7053
7028 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
7029 * must be the last functions to be called before exiting.
7030 */
7031 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 7054 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
7032 int rc = 0; 7055 int rc;
7033 7056
7057 mutex_lock(&bp->link_lock);
7034 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 7058 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
7035 &bp->sp_event)) 7059 &bp->sp_event))
7036 bnxt_hwrm_phy_qcaps(bp); 7060 bnxt_hwrm_phy_qcaps(bp);
7037 7061
7038 bnxt_rtnl_lock_sp(bp); 7062 rc = bnxt_update_link(bp, true);
7039 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7063 mutex_unlock(&bp->link_lock);
7040 rc = bnxt_update_link(bp, true);
7041 bnxt_rtnl_unlock_sp(bp);
7042 if (rc) 7064 if (rc)
7043 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 7065 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
7044 rc); 7066 rc);
7045 } 7067 }
7046 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 7068 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
7047 bnxt_rtnl_lock_sp(bp); 7069 mutex_lock(&bp->link_lock);
7048 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7070 bnxt_get_port_module_status(bp);
7049 bnxt_get_port_module_status(bp); 7071 mutex_unlock(&bp->link_lock);
7050 bnxt_rtnl_unlock_sp(bp);
7051 } 7072 }
7073 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
7074 * must be the last functions to be called before exiting.
7075 */
7052 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 7076 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
7053 bnxt_reset(bp, false); 7077 bnxt_reset(bp, false);
7054 7078
@@ -7433,7 +7457,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
7433 spin_unlock_bh(&bp->ntp_fltr_lock); 7457 spin_unlock_bh(&bp->ntp_fltr_lock);
7434 7458
7435 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 7459 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
7436 schedule_work(&bp->sp_task); 7460 bnxt_queue_sp_work(bp);
7437 7461
7438 return new_fltr->sw_id; 7462 return new_fltr->sw_id;
7439 7463
@@ -7516,7 +7540,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
7516 if (bp->vxlan_port_cnt == 1) { 7540 if (bp->vxlan_port_cnt == 1) {
7517 bp->vxlan_port = ti->port; 7541 bp->vxlan_port = ti->port;
7518 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 7542 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
7519 schedule_work(&bp->sp_task); 7543 bnxt_queue_sp_work(bp);
7520 } 7544 }
7521 break; 7545 break;
7522 case UDP_TUNNEL_TYPE_GENEVE: 7546 case UDP_TUNNEL_TYPE_GENEVE:
@@ -7533,7 +7557,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
7533 return; 7557 return;
7534 } 7558 }
7535 7559
7536 schedule_work(&bp->sp_task); 7560 bnxt_queue_sp_work(bp);
7537} 7561}
7538 7562
7539static void bnxt_udp_tunnel_del(struct net_device *dev, 7563static void bnxt_udp_tunnel_del(struct net_device *dev,
@@ -7572,7 +7596,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
7572 return; 7596 return;
7573 } 7597 }
7574 7598
7575 schedule_work(&bp->sp_task); 7599 bnxt_queue_sp_work(bp);
7576} 7600}
7577 7601
7578static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7602static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
@@ -7720,7 +7744,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
7720 pci_disable_pcie_error_reporting(pdev); 7744 pci_disable_pcie_error_reporting(pdev);
7721 unregister_netdev(dev); 7745 unregister_netdev(dev);
7722 bnxt_shutdown_tc(bp); 7746 bnxt_shutdown_tc(bp);
7723 cancel_work_sync(&bp->sp_task); 7747 bnxt_cancel_sp_work(bp);
7724 bp->sp_event = 0; 7748 bp->sp_event = 0;
7725 7749
7726 bnxt_clear_int_mode(bp); 7750 bnxt_clear_int_mode(bp);
@@ -7748,6 +7772,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
7748 rc); 7772 rc);
7749 return rc; 7773 return rc;
7750 } 7774 }
7775 mutex_init(&bp->link_lock);
7751 7776
7752 rc = bnxt_update_link(bp, false); 7777 rc = bnxt_update_link(bp, false);
7753 if (rc) { 7778 if (rc) {
@@ -7946,7 +7971,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7946 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 7971 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
7947 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; 7972 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
7948 7973
7949 if (pcie_get_minimum_link(bp->pdev, &speed, &width) || 7974 if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
7950 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) 7975 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
7951 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); 7976 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
7952 else 7977 else
@@ -8138,8 +8163,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8138 else 8163 else
8139 device_set_wakeup_capable(&pdev->dev, false); 8164 device_set_wakeup_capable(&pdev->dev, false);
8140 8165
8141 if (BNXT_PF(bp)) 8166 if (BNXT_PF(bp)) {
8167 if (!bnxt_pf_wq) {
8168 bnxt_pf_wq =
8169 create_singlethread_workqueue("bnxt_pf_wq");
8170 if (!bnxt_pf_wq) {
8171 dev_err(&pdev->dev, "Unable to create workqueue.\n");
8172 goto init_err_pci_clean;
8173 }
8174 }
8142 bnxt_init_tc(bp); 8175 bnxt_init_tc(bp);
8176 }
8143 8177
8144 rc = register_netdev(dev); 8178 rc = register_netdev(dev);
8145 if (rc) 8179 if (rc)
@@ -8375,4 +8409,17 @@ static struct pci_driver bnxt_pci_driver = {
8375#endif 8409#endif
8376}; 8410};
8377 8411
8378module_pci_driver(bnxt_pci_driver); 8412static int __init bnxt_init(void)
8413{
8414 return pci_register_driver(&bnxt_pci_driver);
8415}
8416
8417static void __exit bnxt_exit(void)
8418{
8419 pci_unregister_driver(&bnxt_pci_driver);
8420 if (bnxt_pf_wq)
8421 destroy_workqueue(bnxt_pf_wq);
8422}
8423
8424module_init(bnxt_init);
8425module_exit(bnxt_exit);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 7b888d4b2b55..c911e69ff25f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1290,6 +1290,10 @@ struct bnxt {
1290 unsigned long *ntp_fltr_bmap; 1290 unsigned long *ntp_fltr_bmap;
1291 int ntp_fltr_count; 1291 int ntp_fltr_count;
1292 1292
1293 /* To protect link related settings during link changes and
1294 * ethtool settings changes.
1295 */
1296 struct mutex link_lock;
1293 struct bnxt_link_info link_info; 1297 struct bnxt_link_info link_info;
1294 struct ethtool_eee eee; 1298 struct ethtool_eee eee;
1295 u32 lpi_tmr_lo; 1299 u32 lpi_tmr_lo;
@@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *);
1358int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); 1362int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
1359void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); 1363void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
1360int _hwrm_send_message(struct bnxt *, void *, u32, int); 1364int _hwrm_send_message(struct bnxt *, void *, u32, int);
1365int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
1361int hwrm_send_message(struct bnxt *, void *, u32, int); 1366int hwrm_send_message(struct bnxt *, void *, u32, int);
1362int hwrm_send_message_silent(struct bnxt *, void *, u32, int); 1367int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
1363int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, 1368int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index aa1f3a2c7a78..fed37cd9ae1d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
50 50
51 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); 51 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
52 req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 52 req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
53 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 53
54 mutex_lock(&bp->hwrm_cmd_lock);
55 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
54 if (!rc) { 56 if (!rc) {
55 u8 *pri2cos = &resp->pri0_cos_queue_id; 57 u8 *pri2cos = &resp->pri0_cos_queue_id;
56 int i, j; 58 int i, j;
@@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
66 } 68 }
67 } 69 }
68 } 70 }
71 mutex_unlock(&bp->hwrm_cmd_lock);
69 return rc; 72 return rc;
70} 73}
71 74
@@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
119 int rc, i; 122 int rc, i;
120 123
121 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); 124 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
122 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 125
123 if (rc) 126 mutex_lock(&bp->hwrm_cmd_lock);
127 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
128 if (rc) {
129 mutex_unlock(&bp->hwrm_cmd_lock);
124 return rc; 130 return rc;
131 }
125 132
126 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); 133 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
127 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { 134 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
@@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
143 } 150 }
144 } 151 }
145 } 152 }
153 mutex_unlock(&bp->hwrm_cmd_lock);
146 return 0; 154 return 0;
147} 155}
148 156
@@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
240 int rc; 248 int rc;
241 249
242 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); 250 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
243 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 251
244 if (rc) 252 mutex_lock(&bp->hwrm_cmd_lock);
253 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
254 if (rc) {
255 mutex_unlock(&bp->hwrm_cmd_lock);
245 return rc; 256 return rc;
257 }
246 258
247 pri_mask = le32_to_cpu(resp->flags); 259 pri_mask = le32_to_cpu(resp->flags);
248 pfc->pfc_en = pri_mask; 260 pfc->pfc_en = pri_mask;
261 mutex_unlock(&bp->hwrm_cmd_lock);
249 return 0; 262 return 0;
250} 263}
251 264
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8eff05a3e0e4..3cbe771b3352 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
1052 u32 ethtool_speed; 1052 u32 ethtool_speed;
1053 1053
1054 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 1054 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1055 mutex_lock(&bp->link_lock);
1055 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); 1056 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
1056 1057
1057 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 1058 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
@@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
1099 base->port = PORT_FIBRE; 1100 base->port = PORT_FIBRE;
1100 } 1101 }
1101 base->phy_address = link_info->phy_addr; 1102 base->phy_address = link_info->phy_addr;
1103 mutex_unlock(&bp->link_lock);
1102 1104
1103 return 0; 1105 return 0;
1104} 1106}
@@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
1190 if (!BNXT_SINGLE_PF(bp)) 1192 if (!BNXT_SINGLE_PF(bp))
1191 return -EOPNOTSUPP; 1193 return -EOPNOTSUPP;
1192 1194
1195 mutex_lock(&bp->link_lock);
1193 if (base->autoneg == AUTONEG_ENABLE) { 1196 if (base->autoneg == AUTONEG_ENABLE) {
1194 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, 1197 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
1195 advertising); 1198 advertising);
@@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
1234 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 1237 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1235 1238
1236set_setting_exit: 1239set_setting_exit:
1240 mutex_unlock(&bp->link_lock);
1237 return rc; 1241 return rc;
1238} 1242}
1239 1243
@@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1805 req.dir_ordinal = cpu_to_le16(ordinal); 1809 req.dir_ordinal = cpu_to_le16(ordinal);
1806 req.dir_ext = cpu_to_le16(ext); 1810 req.dir_ext = cpu_to_le16(ext);
1807 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 1811 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
1808 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1812 mutex_lock(&bp->hwrm_cmd_lock);
1813 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1809 if (rc == 0) { 1814 if (rc == 0) {
1810 if (index) 1815 if (index)
1811 *index = le16_to_cpu(output->dir_idx); 1816 *index = le16_to_cpu(output->dir_idx);
@@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1814 if (data_length) 1819 if (data_length)
1815 *data_length = le32_to_cpu(output->dir_data_length); 1820 *data_length = le32_to_cpu(output->dir_data_length);
1816 } 1821 }
1822 mutex_unlock(&bp->hwrm_cmd_lock);
1817 return rc; 1823 return rc;
1818} 1824}
1819 1825
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index d37925a8a65b..5ee18660bc33 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
502 int rc = 0, vfs_supported; 502 int rc = 0, vfs_supported;
503 int min_rx_rings, min_tx_rings, min_rss_ctxs; 503 int min_rx_rings, min_tx_rings, min_rss_ctxs;
504 int tx_ok = 0, rx_ok = 0, rss_ok = 0; 504 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
505 int avail_cp, avail_stat;
505 506
506 /* Check if we can enable requested num of vf's. At a mininum 507 /* Check if we can enable requested num of vf's. At a mininum
507 * we require 1 RX 1 TX rings for each VF. In this minimum conf 508 * we require 1 RX 1 TX rings for each VF. In this minimum conf
@@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
509 */ 510 */
510 vfs_supported = *num_vfs; 511 vfs_supported = *num_vfs;
511 512
513 avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
514 avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
515 avail_cp = min_t(int, avail_cp, avail_stat);
516
512 while (vfs_supported) { 517 while (vfs_supported) {
513 min_rx_rings = vfs_supported; 518 min_rx_rings = vfs_supported;
514 min_tx_rings = vfs_supported; 519 min_tx_rings = vfs_supported;
@@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
523 min_rx_rings) 528 min_rx_rings)
524 rx_ok = 1; 529 rx_ok = 1;
525 } 530 }
526 if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings) 531 if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
532 avail_cp < min_rx_rings)
527 rx_ok = 0; 533 rx_ok = 0;
528 534
529 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) 535 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
536 avail_cp >= min_tx_rings)
530 tx_ok = 1; 537 tx_ok = 1;
531 538
532 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) 539 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index e7f54948173f..5b19826a7e16 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1847,7 +1847,7 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1847 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1847 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1848 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1848 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1849 1849
1850 ns = timespec_to_ns(ts); 1850 ns = timespec64_to_ns(ts);
1851 1851
1852 spin_lock_irqsave(&lio->ptp_lock, flags); 1852 spin_lock_irqsave(&lio->ptp_lock, flags);
1853 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 1853 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index cb8182f4fdfa..c66abd476023 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1093,11 +1093,12 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1093 * places them in a descriptor array, scrq_arr 1093 * places them in a descriptor array, scrq_arr
1094 */ 1094 */
1095 1095
1096static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 1096static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1097 union sub_crq *scrq_arr) 1097 union sub_crq *scrq_arr)
1098{ 1098{
1099 union sub_crq hdr_desc; 1099 union sub_crq hdr_desc;
1100 int tmp_len = len; 1100 int tmp_len = len;
1101 int num_descs = 0;
1101 u8 *data, *cur; 1102 u8 *data, *cur;
1102 int tmp; 1103 int tmp;
1103 1104
@@ -1126,7 +1127,10 @@ static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1126 tmp_len -= tmp; 1127 tmp_len -= tmp;
1127 *scrq_arr = hdr_desc; 1128 *scrq_arr = hdr_desc;
1128 scrq_arr++; 1129 scrq_arr++;
1130 num_descs++;
1129 } 1131 }
1132
1133 return num_descs;
1130} 1134}
1131 1135
1132/** 1136/**
@@ -1144,16 +1148,12 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1144 int *num_entries, u8 hdr_field) 1148 int *num_entries, u8 hdr_field)
1145{ 1149{
1146 int hdr_len[3] = {0, 0, 0}; 1150 int hdr_len[3] = {0, 0, 0};
1147 int tot_len, len; 1151 int tot_len;
1148 u8 *hdr_data = txbuff->hdr_data; 1152 u8 *hdr_data = txbuff->hdr_data;
1149 1153
1150 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, 1154 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1151 txbuff->hdr_data); 1155 txbuff->hdr_data);
1152 len = tot_len; 1156 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1153 len -= 24;
1154 if (len > 0)
1155 num_entries += len % 29 ? len / 29 + 1 : len / 29;
1156 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1157 txbuff->indir_arr + 1); 1157 txbuff->indir_arr + 1);
1158} 1158}
1159 1159
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 57505b1df98d..d591b3e6bd7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
298} 298}
299 299
300/** 300/**
301 * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking 301 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
302 * @hw: pointer to the HW structure 302 * @hw: pointer to the HW structure
303 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 303 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
304 * @data: word read from the Shadow RAM 304 * @data: word read from the Shadow RAM
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 1519dfb851d0..2756131495f0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1038,6 +1038,32 @@ reset_latency:
1038} 1038}
1039 1039
1040/** 1040/**
1041 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1042 * @rx_ring: rx descriptor ring to store buffers on
1043 * @old_buff: donor buffer to have page reused
1044 *
1045 * Synchronizes page for reuse by the adapter
1046 **/
1047static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1048 struct i40e_rx_buffer *old_buff)
1049{
1050 struct i40e_rx_buffer *new_buff;
1051 u16 nta = rx_ring->next_to_alloc;
1052
1053 new_buff = &rx_ring->rx_bi[nta];
1054
1055 /* update, and store next to alloc */
1056 nta++;
1057 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1058
1059 /* transfer page from old buffer to new buffer */
1060 new_buff->dma = old_buff->dma;
1061 new_buff->page = old_buff->page;
1062 new_buff->page_offset = old_buff->page_offset;
1063 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1064}
1065
1066/**
1041 * i40e_rx_is_programming_status - check for programming status descriptor 1067 * i40e_rx_is_programming_status - check for programming status descriptor
1042 * @qw: qword representing status_error_len in CPU ordering 1068 * @qw: qword representing status_error_len in CPU ordering
1043 * 1069 *
@@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1071 union i40e_rx_desc *rx_desc, 1097 union i40e_rx_desc *rx_desc,
1072 u64 qw) 1098 u64 qw)
1073{ 1099{
1074 u32 ntc = rx_ring->next_to_clean + 1; 1100 struct i40e_rx_buffer *rx_buffer;
1101 u32 ntc = rx_ring->next_to_clean;
1075 u8 id; 1102 u8 id;
1076 1103
1077 /* fetch, update, and store next to clean */ 1104 /* fetch, update, and store next to clean */
1105 rx_buffer = &rx_ring->rx_bi[ntc++];
1078 ntc = (ntc < rx_ring->count) ? ntc : 0; 1106 ntc = (ntc < rx_ring->count) ? ntc : 0;
1079 rx_ring->next_to_clean = ntc; 1107 rx_ring->next_to_clean = ntc;
1080 1108
1081 prefetch(I40E_RX_DESC(rx_ring, ntc)); 1109 prefetch(I40E_RX_DESC(rx_ring, ntc));
1082 1110
1111 /* place unused page back on the ring */
1112 i40e_reuse_rx_page(rx_ring, rx_buffer);
1113 rx_ring->rx_stats.page_reuse_count++;
1114
1115 /* clear contents of buffer_info */
1116 rx_buffer->page = NULL;
1117
1083 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> 1118 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1084 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; 1119 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1085 1120
@@ -1639,32 +1674,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1639} 1674}
1640 1675
1641/** 1676/**
1642 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1643 * @rx_ring: rx descriptor ring to store buffers on
1644 * @old_buff: donor buffer to have page reused
1645 *
1646 * Synchronizes page for reuse by the adapter
1647 **/
1648static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1649 struct i40e_rx_buffer *old_buff)
1650{
1651 struct i40e_rx_buffer *new_buff;
1652 u16 nta = rx_ring->next_to_alloc;
1653
1654 new_buff = &rx_ring->rx_bi[nta];
1655
1656 /* update, and store next to alloc */
1657 nta++;
1658 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1659
1660 /* transfer page from old buffer to new buffer */
1661 new_buff->dma = old_buff->dma;
1662 new_buff->page = old_buff->page;
1663 new_buff->page_offset = old_buff->page_offset;
1664 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1665}
1666
1667/**
1668 * i40e_page_is_reusable - check if any reuse is possible 1677 * i40e_page_is_reusable - check if any reuse is possible
1669 * @page: page struct to check 1678 * @page: page struct to check
1670 * 1679 *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 9d5e7cf288be..f3315bc874ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -96,6 +96,7 @@ struct mlxsw_core {
96 const struct mlxsw_bus *bus; 96 const struct mlxsw_bus *bus;
97 void *bus_priv; 97 void *bus_priv;
98 const struct mlxsw_bus_info *bus_info; 98 const struct mlxsw_bus_info *bus_info;
99 struct workqueue_struct *emad_wq;
99 struct list_head rx_listener_list; 100 struct list_head rx_listener_list;
100 struct list_head event_listener_list; 101 struct list_head event_listener_list;
101 struct { 102 struct {
@@ -465,7 +466,7 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
465{ 466{
466 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 467 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
467 468
468 mlxsw_core_schedule_dw(&trans->timeout_dw, timeout); 469 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
469} 470}
470 471
471static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 472static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
@@ -587,12 +588,18 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener =
587 588
588static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 589static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
589{ 590{
591 struct workqueue_struct *emad_wq;
590 u64 tid; 592 u64 tid;
591 int err; 593 int err;
592 594
593 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 595 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
594 return 0; 596 return 0;
595 597
598 emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
599 if (!emad_wq)
600 return -ENOMEM;
601 mlxsw_core->emad_wq = emad_wq;
602
596 /* Set the upper 32 bits of the transaction ID field to a random 603 /* Set the upper 32 bits of the transaction ID field to a random
597 * number. This allows us to discard EMADs addressed to other 604 * number. This allows us to discard EMADs addressed to other
598 * devices. 605 * devices.
@@ -619,6 +626,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
619err_emad_trap_set: 626err_emad_trap_set:
620 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 627 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
621 mlxsw_core); 628 mlxsw_core);
629 destroy_workqueue(mlxsw_core->emad_wq);
622 return err; 630 return err;
623} 631}
624 632
@@ -631,6 +639,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
631 mlxsw_core->emad.use_emad = false; 639 mlxsw_core->emad.use_emad = false;
632 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 640 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
633 mlxsw_core); 641 mlxsw_core);
642 destroy_workqueue(mlxsw_core->emad_wq);
634} 643}
635 644
636static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 645static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index cc27c5de5a1d..4afc8486eb9a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -6401,6 +6401,36 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
6401 mlxsw_reg_mgpc_opcode_set(payload, opcode); 6401 mlxsw_reg_mgpc_opcode_set(payload, opcode);
6402} 6402}
6403 6403
6404/* TIGCR - Tunneling IPinIP General Configuration Register
6405 * -------------------------------------------------------
6406 * The TIGCR register is used for setting up the IPinIP Tunnel configuration.
6407 */
6408#define MLXSW_REG_TIGCR_ID 0xA801
6409#define MLXSW_REG_TIGCR_LEN 0x10
6410
6411MLXSW_REG_DEFINE(tigcr, MLXSW_REG_TIGCR_ID, MLXSW_REG_TIGCR_LEN);
6412
6413/* reg_tigcr_ipip_ttlc
6414 * For IPinIP Tunnel encapsulation: whether to copy the ttl from the packet
6415 * header.
6416 * Access: RW
6417 */
6418MLXSW_ITEM32(reg, tigcr, ttlc, 0x04, 8, 1);
6419
6420/* reg_tigcr_ipip_ttl_uc
6421 * The TTL for IPinIP Tunnel encapsulation of unicast packets if
6422 * reg_tigcr_ipip_ttlc is unset.
6423 * Access: RW
6424 */
6425MLXSW_ITEM32(reg, tigcr, ttl_uc, 0x04, 0, 8);
6426
6427static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc)
6428{
6429 MLXSW_REG_ZERO(tigcr, payload);
6430 mlxsw_reg_tigcr_ttlc_set(payload, ttlc);
6431 mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc);
6432}
6433
6404/* SBPR - Shared Buffer Pools Register 6434/* SBPR - Shared Buffer Pools Register
6405 * ----------------------------------- 6435 * -----------------------------------
6406 * The SBPR configures and retrieves the shared buffer pools and configuration. 6436 * The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -6881,6 +6911,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
6881 MLXSW_REG(mcc), 6911 MLXSW_REG(mcc),
6882 MLXSW_REG(mcda), 6912 MLXSW_REG(mcda),
6883 MLXSW_REG(mgpc), 6913 MLXSW_REG(mgpc),
6914 MLXSW_REG(tigcr),
6884 MLXSW_REG(sbpr), 6915 MLXSW_REG(sbpr),
6885 MLXSW_REG(sbcm), 6916 MLXSW_REG(sbcm),
6886 MLXSW_REG(sbpm), 6917 MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index c16718d296d3..5189022a1c8c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -5896,11 +5896,20 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
5896 kfree(mlxsw_sp->router->rifs); 5896 kfree(mlxsw_sp->router->rifs);
5897} 5897}
5898 5898
5899static int
5900mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
5901{
5902 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
5903
5904 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
5905 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
5906}
5907
5899static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) 5908static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
5900{ 5909{
5901 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; 5910 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
5902 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); 5911 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
5903 return 0; 5912 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
5904} 5913}
5905 5914
5906static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) 5915static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1c0187f0af51..e118b5f23996 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1180,10 +1180,14 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1180{ 1180{
1181 void *frag; 1181 void *frag;
1182 1182
1183 if (!dp->xdp_prog) 1183 if (!dp->xdp_prog) {
1184 frag = netdev_alloc_frag(dp->fl_bufsz); 1184 frag = netdev_alloc_frag(dp->fl_bufsz);
1185 else 1185 } else {
1186 frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD)); 1186 struct page *page;
1187
1188 page = alloc_page(GFP_KERNEL | __GFP_COLD);
1189 frag = page ? page_address(page) : NULL;
1190 }
1187 if (!frag) { 1191 if (!frag) {
1188 nn_dp_warn(dp, "Failed to alloc receive page frag\n"); 1192 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1189 return NULL; 1193 return NULL;
@@ -1203,10 +1207,14 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1203{ 1207{
1204 void *frag; 1208 void *frag;
1205 1209
1206 if (!dp->xdp_prog) 1210 if (!dp->xdp_prog) {
1207 frag = napi_alloc_frag(dp->fl_bufsz); 1211 frag = napi_alloc_frag(dp->fl_bufsz);
1208 else 1212 } else {
1209 frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD)); 1213 struct page *page;
1214
1215 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
1216 frag = page ? page_address(page) : NULL;
1217 }
1210 if (!frag) { 1218 if (!frag) {
1211 nn_dp_warn(dp, "Failed to alloc receive page frag\n"); 1219 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1212 return NULL; 1220 return NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 07969f06df10..dc016dfec64d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -464,7 +464,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
464 464
465 do { 465 do {
466 start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); 466 start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
467 *data++ = nn->r_vecs[i].rx_pkts; 467 data[0] = nn->r_vecs[i].rx_pkts;
468 tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; 468 tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
469 tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; 469 tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
470 tmp[2] = nn->r_vecs[i].hw_csum_rx_error; 470 tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
@@ -472,14 +472,16 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
472 472
473 do { 473 do {
474 start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); 474 start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
475 *data++ = nn->r_vecs[i].tx_pkts; 475 data[1] = nn->r_vecs[i].tx_pkts;
476 *data++ = nn->r_vecs[i].tx_busy; 476 data[2] = nn->r_vecs[i].tx_busy;
477 tmp[3] = nn->r_vecs[i].hw_csum_tx; 477 tmp[3] = nn->r_vecs[i].hw_csum_tx;
478 tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; 478 tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
479 tmp[5] = nn->r_vecs[i].tx_gather; 479 tmp[5] = nn->r_vecs[i].tx_gather;
480 tmp[6] = nn->r_vecs[i].tx_lso; 480 tmp[6] = nn->r_vecs[i].tx_lso;
481 } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); 481 } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
482 482
483 data += 3;
484
483 for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) 485 for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
484 gathered_stats[j] += tmp[j]; 486 gathered_stats[j] += tmp[j];
485 } 487 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e03fcf914690..a3c949ea7d1a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8491,8 +8491,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8491 rtl8168_driver_start(tp); 8491 rtl8168_driver_start(tp);
8492 } 8492 }
8493 8493
8494 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
8495
8496 if (pci_dev_run_wake(pdev)) 8494 if (pci_dev_run_wake(pdev))
8497 pm_runtime_put_noidle(&pdev->dev); 8495 pm_runtime_put_noidle(&pdev->dev);
8498 8496
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index e0ef02f9503b..4b286e27c4ca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -275,7 +275,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
275 goto exit; 275 goto exit;
276 i++; 276 i++;
277 277
278 } while ((ret == 1) || (i < 10)); 278 } while ((ret == 1) && (i < 10));
279 279
280 if (i == 10) 280 if (i == 10)
281 ret = -EBUSY; 281 ret = -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 67af0bdd7f10..7516ca210855 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -34,7 +34,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
34 34
35 err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, 35 err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
36 !(value & DMA_BUS_MODE_SFT_RESET), 36 !(value & DMA_BUS_MODE_SFT_RESET),
37 100000, 10000); 37 10000, 100000);
38 if (err) 38 if (err)
39 return -EBUSY; 39 return -EBUSY;
40 40
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 1763e48c84e2..16bd50929084 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -473,19 +473,18 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473 struct dma_desc *np, struct sk_buff *skb) 473 struct dma_desc *np, struct sk_buff *skb)
474{ 474{
475 struct skb_shared_hwtstamps *shhwtstamp = NULL; 475 struct skb_shared_hwtstamps *shhwtstamp = NULL;
476 struct dma_desc *desc = p;
476 u64 ns; 477 u64 ns;
477 478
478 if (!priv->hwts_rx_en) 479 if (!priv->hwts_rx_en)
479 return; 480 return;
481 /* For GMAC4, the valid timestamp is from CTX next desc. */
482 if (priv->plat->has_gmac4)
483 desc = np;
480 484
481 /* Check if timestamp is available */ 485 /* Check if timestamp is available */
482 if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { 486 if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
483 /* For GMAC4, the valid timestamp is from CTX next desc. */ 487 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
484 if (priv->plat->has_gmac4)
485 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
486 else
487 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
488
489 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 488 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490 shhwtstamp = skb_hwtstamps(skb); 489 shhwtstamp = skb_hwtstamps(skb);
491 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 490 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
@@ -1800,12 +1799,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1800{ 1799{
1801 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1800 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1802 unsigned int bytes_compl = 0, pkts_compl = 0; 1801 unsigned int bytes_compl = 0, pkts_compl = 0;
1803 unsigned int entry = tx_q->dirty_tx; 1802 unsigned int entry;
1804 1803
1805 netif_tx_lock(priv->dev); 1804 netif_tx_lock(priv->dev);
1806 1805
1807 priv->xstats.tx_clean++; 1806 priv->xstats.tx_clean++;
1808 1807
1808 entry = tx_q->dirty_tx;
1809 while (entry != tx_q->cur_tx) { 1809 while (entry != tx_q->cur_tx) {
1810 struct sk_buff *skb = tx_q->tx_skbuff[entry]; 1810 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1811 struct dma_desc *p; 1811 struct dma_desc *p;
@@ -3333,6 +3333,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3333 * them in stmmac_rx_refill() function so that 3333 * them in stmmac_rx_refill() function so that
3334 * device can reuse it. 3334 * device can reuse it.
3335 */ 3335 */
3336 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3336 rx_q->rx_skbuff[entry] = NULL; 3337 rx_q->rx_skbuff[entry] = NULL;
3337 dma_unmap_single(priv->device, 3338 dma_unmap_single(priv->device,
3338 rx_q->rx_skbuff_dma[entry], 3339 rx_q->rx_skbuff_dma[entry],
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index f6404074b7b0..ed51018a813e 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
113 113
114static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni) 114static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni)
115{ 115{
116#ifdef __BIG_ENDIAN
117 return (vni[0] == tun_id[2]) &&
118 (vni[1] == tun_id[1]) &&
119 (vni[2] == tun_id[0]);
120#else
121 return !memcmp(vni, &tun_id[5], 3); 116 return !memcmp(vni, &tun_id[5], 3);
122#endif
123} 117}
124 118
125static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) 119static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 98e4deaa3a6a..5ab1b8849c30 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -742,6 +742,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
742 sg_init_table(sg, ret); 742 sg_init_table(sg, ret);
743 ret = skb_to_sgvec(skb, sg, 0, skb->len); 743 ret = skb_to_sgvec(skb, sg, 0, skb->len);
744 if (unlikely(ret < 0)) { 744 if (unlikely(ret < 0)) {
745 aead_request_free(req);
745 macsec_txsa_put(tx_sa); 746 macsec_txsa_put(tx_sa);
746 kfree_skb(skb); 747 kfree_skb(skb);
747 return ERR_PTR(ret); 748 return ERR_PTR(ret);
@@ -954,6 +955,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
954 sg_init_table(sg, ret); 955 sg_init_table(sg, ret);
955 ret = skb_to_sgvec(skb, sg, 0, skb->len); 956 ret = skb_to_sgvec(skb, sg, 0, skb->len);
956 if (unlikely(ret < 0)) { 957 if (unlikely(ret < 0)) {
958 aead_request_free(req);
957 kfree_skb(skb); 959 kfree_skb(skb);
958 return ERR_PTR(ret); 960 return ERR_PTR(ret);
959 } 961 }
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 5ce580f413b9..e21bf90b819f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2027,6 +2027,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2027 2027
2028 if (!dev) 2028 if (!dev)
2029 return -ENOMEM; 2029 return -ENOMEM;
2030 err = dev_get_valid_name(net, dev, name);
2031 if (err)
2032 goto err_free_dev;
2030 2033
2031 dev_net_set(dev, net); 2034 dev_net_set(dev, net);
2032 dev->rtnl_link_ops = &tun_link_ops; 2035 dev->rtnl_link_ops = &tun_link_ops;
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index c9c711dcd0e6..a89b5685e68b 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -652,7 +652,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
652 struct device *dev = i2400m_dev(i2400m); 652 struct device *dev = i2400m_dev(i2400m);
653 struct { 653 struct {
654 struct i2400m_bootrom_header cmd; 654 struct i2400m_bootrom_header cmd;
655 u8 cmd_payload[chunk_len]; 655 u8 cmd_payload[];
656 } __packed *buf; 656 } __packed *buf;
657 struct i2400m_bootrom_header ack; 657 struct i2400m_bootrom_header ack;
658 658
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 4eb1e1ce9ace..ef72baf6dd96 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -429,7 +429,8 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
429 if (code != BRCMF_E_IF && !fweh->evt_handler[code]) 429 if (code != BRCMF_E_IF && !fweh->evt_handler[code])
430 return; 430 return;
431 431
432 if (datalen > BRCMF_DCMD_MAXLEN) 432 if (datalen > BRCMF_DCMD_MAXLEN ||
433 datalen + sizeof(*event_packet) > packet_len)
433 return; 434 return;
434 435
435 if (in_interrupt()) 436 if (in_interrupt())
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index b3aab2fe96eb..ef685465f80a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -14764,8 +14764,8 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
14764} 14764}
14765 14765
14766static void 14766static void
14767wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys, 14767wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events,
14768 u8 len) 14768 const u8 *dlys, u8 len)
14769{ 14769{
14770 u32 t1_offset, t2_offset; 14770 u32 t1_offset, t2_offset;
14771 u8 ctr; 14771 u8 ctr;
@@ -15240,16 +15240,16 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
15240static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi) 15240static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
15241{ 15241{
15242 u16 currband; 15242 u16 currband;
15243 s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 }; 15243 static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
15244 s8 *lna1_gain_db = NULL; 15244 const s8 *lna1_gain_db = NULL;
15245 s8 *lna1_gain_db_2 = NULL; 15245 const s8 *lna1_gain_db_2 = NULL;
15246 s8 *lna2_gain_db = NULL; 15246 const s8 *lna2_gain_db = NULL;
15247 s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 }; 15247 static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
15248 s8 *tia_gain_db; 15248 const s8 *tia_gain_db;
15249 s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 }; 15249 static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
15250 s8 *tia_gainbits; 15250 const s8 *tia_gainbits;
15251 u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f }; 15251 static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
15252 u16 *rfseq_init_gain; 15252 const u16 *rfseq_init_gain;
15253 u16 init_gaincode; 15253 u16 init_gaincode;
15254 u16 clip1hi_gaincode; 15254 u16 clip1hi_gaincode;
15255 u16 clip1md_gaincode = 0; 15255 u16 clip1md_gaincode = 0;
@@ -15310,10 +15310,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
15310 15310
15311 if ((freq <= 5080) || (freq == 5825)) { 15311 if ((freq <= 5080) || (freq == 5825)) {
15312 15312
15313 s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 }; 15313 static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
15314 s8 lna1A_gain_db_2_rev7[] = { 15314 static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25};
15315 11, 17, 22, 25}; 15315 static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
15316 s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
15317 15316
15318 crsminu_th = 0x3e; 15317 crsminu_th = 0x3e;
15319 lna1_gain_db = lna1A_gain_db_rev7; 15318 lna1_gain_db = lna1A_gain_db_rev7;
@@ -15321,10 +15320,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
15321 lna2_gain_db = lna2A_gain_db_rev7; 15320 lna2_gain_db = lna2A_gain_db_rev7;
15322 } else if ((freq >= 5500) && (freq <= 5700)) { 15321 } else if ((freq >= 5500) && (freq <= 5700)) {
15323 15322
15324 s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 }; 15323 static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
15325 s8 lna1A_gain_db_2_rev7[] = { 15324 static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
15326 12, 18, 22, 26}; 15325 static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
15327 s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
15328 15326
15329 crsminu_th = 0x45; 15327 crsminu_th = 0x45;
15330 clip1md_gaincode_B = 0x14; 15328 clip1md_gaincode_B = 0x14;
@@ -15335,10 +15333,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
15335 lna2_gain_db = lna2A_gain_db_rev7; 15333 lna2_gain_db = lna2A_gain_db_rev7;
15336 } else { 15334 } else {
15337 15335
15338 s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 }; 15336 static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
15339 s8 lna1A_gain_db_2_rev7[] = { 15337 static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
15340 12, 18, 22, 26}; 15338 static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
15341 s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
15342 15339
15343 crsminu_th = 0x41; 15340 crsminu_th = 0x41;
15344 lna1_gain_db = lna1A_gain_db_rev7; 15341 lna1_gain_db = lna1A_gain_db_rev7;
@@ -15450,65 +15447,65 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
15450 NPHY_RFSEQ_CMD_CLR_HIQ_DIS, 15447 NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
15451 NPHY_RFSEQ_CMD_SET_HPF_BW 15448 NPHY_RFSEQ_CMD_SET_HPF_BW
15452 }; 15449 };
15453 u8 rfseq_updategainu_dlys[] = { 10, 30, 1 }; 15450 static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
15454 s8 lna1G_gain_db[] = { 7, 11, 16, 23 }; 15451 static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
15455 s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 }; 15452 static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
15456 s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 }; 15453 static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
15457 s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 }; 15454 static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
15458 s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 }; 15455 static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
15459 s8 lna1A_gain_db[] = { 7, 11, 17, 23 }; 15456 static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
15460 s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 }; 15457 static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
15461 s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 }; 15458 static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
15462 s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 }; 15459 static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
15463 s8 *lna1_gain_db = NULL; 15460 const s8 *lna1_gain_db = NULL;
15464 s8 lna2G_gain_db[] = { -5, 6, 10, 14 }; 15461 static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
15465 s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 }; 15462 static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
15466 s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 }; 15463 static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
15467 s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 }; 15464 static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
15468 s8 lna2A_gain_db[] = { -6, 2, 6, 10 }; 15465 static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
15469 s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 }; 15466 static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
15470 s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 }; 15467 static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
15471 s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 }; 15468 static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
15472 s8 *lna2_gain_db = NULL; 15469 const s8 *lna2_gain_db = NULL;
15473 s8 tiaG_gain_db[] = { 15470 static const s8 tiaG_gain_db[] = {
15474 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A }; 15471 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
15475 s8 tiaA_gain_db[] = { 15472 static const s8 tiaA_gain_db[] = {
15476 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 }; 15473 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
15477 s8 tiaA_gain_db_rev4[] = { 15474 static const s8 tiaA_gain_db_rev4[] = {
15478 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d }; 15475 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
15479 s8 tiaA_gain_db_rev5[] = { 15476 static const s8 tiaA_gain_db_rev5[] = {
15480 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d }; 15477 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
15481 s8 tiaA_gain_db_rev6[] = { 15478 static const s8 tiaA_gain_db_rev6[] = {
15482 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d }; 15479 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
15483 s8 *tia_gain_db; 15480 const s8 *tia_gain_db;
15484 s8 tiaG_gainbits[] = { 15481 static const s8 tiaG_gainbits[] = {
15485 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; 15482 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
15486 s8 tiaA_gainbits[] = { 15483 static const s8 tiaA_gainbits[] = {
15487 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 }; 15484 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
15488 s8 tiaA_gainbits_rev4[] = { 15485 static const s8 tiaA_gainbits_rev4[] = {
15489 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 }; 15486 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
15490 s8 tiaA_gainbits_rev5[] = { 15487 static const s8 tiaA_gainbits_rev5[] = {
15491 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 }; 15488 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
15492 s8 tiaA_gainbits_rev6[] = { 15489 static const s8 tiaA_gainbits_rev6[] = {
15493 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 }; 15490 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
15494 s8 *tia_gainbits; 15491 const s8 *tia_gainbits;
15495 s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 }; 15492 static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
15496 s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 }; 15493 static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
15497 u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f }; 15494 static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
15498 u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f }; 15495 static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
15499 u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f }; 15496 static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
15500 u16 rfseqG_init_gain_rev5_elna[] = { 15497 static const u16 rfseqG_init_gain_rev5_elna[] = {
15501 0x013f, 0x013f, 0x013f, 0x013f }; 15498 0x013f, 0x013f, 0x013f, 0x013f };
15502 u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f }; 15499 static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
15503 u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f }; 15500 static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
15504 u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f }; 15501 static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
15505 u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f }; 15502 static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
15506 u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f }; 15503 static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
15507 u16 rfseqA_init_gain_rev4_elna[] = { 15504 static const u16 rfseqA_init_gain_rev4_elna[] = {
15508 0x314f, 0x314f, 0x314f, 0x314f }; 15505 0x314f, 0x314f, 0x314f, 0x314f };
15509 u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f }; 15506 static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
15510 u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f }; 15507 static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
15511 u16 *rfseq_init_gain; 15508 const u16 *rfseq_init_gain;
15512 u16 initG_gaincode = 0x627e; 15509 u16 initG_gaincode = 0x627e;
15513 u16 initG_gaincode_rev4 = 0x527e; 15510 u16 initG_gaincode_rev4 = 0x527e;
15514 u16 initG_gaincode_rev5 = 0x427e; 15511 u16 initG_gaincode_rev5 = 0x427e;
@@ -15538,10 +15535,10 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
15538 u16 clip1mdA_gaincode_rev6 = 0x2084; 15535 u16 clip1mdA_gaincode_rev6 = 0x2084;
15539 u16 clip1md_gaincode = 0; 15536 u16 clip1md_gaincode = 0;
15540 u16 clip1loG_gaincode = 0x0074; 15537 u16 clip1loG_gaincode = 0x0074;
15541 u16 clip1loG_gaincode_rev5[] = { 15538 static const u16 clip1loG_gaincode_rev5[] = {
15542 0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c 15539 0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
15543 }; 15540 };
15544 u16 clip1loG_gaincode_rev6[] = { 15541 static const u16 clip1loG_gaincode_rev6[] = {
15545 0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e 15542 0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
15546 }; 15543 };
15547 u16 clip1loG_gaincode_rev6_224B0 = 0x1074; 15544 u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
@@ -16066,7 +16063,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
16066 16063
16067static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) 16064static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16068{ 16065{
16069 u8 rfseq_rx2tx_events[] = { 16066 static const u8 rfseq_rx2tx_events[] = {
16070 NPHY_RFSEQ_CMD_NOP, 16067 NPHY_RFSEQ_CMD_NOP,
16071 NPHY_RFSEQ_CMD_RXG_FBW, 16068 NPHY_RFSEQ_CMD_RXG_FBW,
16072 NPHY_RFSEQ_CMD_TR_SWITCH, 16069 NPHY_RFSEQ_CMD_TR_SWITCH,
@@ -16076,7 +16073,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16076 NPHY_RFSEQ_CMD_EXT_PA 16073 NPHY_RFSEQ_CMD_EXT_PA
16077 }; 16074 };
16078 u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 }; 16075 u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
16079 u8 rfseq_tx2rx_events[] = { 16076 static const u8 rfseq_tx2rx_events[] = {
16080 NPHY_RFSEQ_CMD_NOP, 16077 NPHY_RFSEQ_CMD_NOP,
16081 NPHY_RFSEQ_CMD_EXT_PA, 16078 NPHY_RFSEQ_CMD_EXT_PA,
16082 NPHY_RFSEQ_CMD_TX_GAIN, 16079 NPHY_RFSEQ_CMD_TX_GAIN,
@@ -16085,8 +16082,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16085 NPHY_RFSEQ_CMD_RXG_FBW, 16082 NPHY_RFSEQ_CMD_RXG_FBW,
16086 NPHY_RFSEQ_CMD_CLR_HIQ_DIS 16083 NPHY_RFSEQ_CMD_CLR_HIQ_DIS
16087 }; 16084 };
16088 u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 }; 16085 static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
16089 u8 rfseq_tx2rx_events_rev3[] = { 16086 static const u8 rfseq_tx2rx_events_rev3[] = {
16090 NPHY_REV3_RFSEQ_CMD_EXT_PA, 16087 NPHY_REV3_RFSEQ_CMD_EXT_PA,
16091 NPHY_REV3_RFSEQ_CMD_INT_PA_PU, 16088 NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
16092 NPHY_REV3_RFSEQ_CMD_TX_GAIN, 16089 NPHY_REV3_RFSEQ_CMD_TX_GAIN,
@@ -16096,7 +16093,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16096 NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS, 16093 NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
16097 NPHY_REV3_RFSEQ_CMD_END 16094 NPHY_REV3_RFSEQ_CMD_END
16098 }; 16095 };
16099 u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 }; 16096 static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
16100 u8 rfseq_rx2tx_events_rev3[] = { 16097 u8 rfseq_rx2tx_events_rev3[] = {
16101 NPHY_REV3_RFSEQ_CMD_NOP, 16098 NPHY_REV3_RFSEQ_CMD_NOP,
16102 NPHY_REV3_RFSEQ_CMD_RXG_FBW, 16099 NPHY_REV3_RFSEQ_CMD_RXG_FBW,
@@ -16110,7 +16107,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16110 }; 16107 };
16111 u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 }; 16108 u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
16112 16109
16113 u8 rfseq_rx2tx_events_rev3_ipa[] = { 16110 static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
16114 NPHY_REV3_RFSEQ_CMD_NOP, 16111 NPHY_REV3_RFSEQ_CMD_NOP,
16115 NPHY_REV3_RFSEQ_CMD_RXG_FBW, 16112 NPHY_REV3_RFSEQ_CMD_RXG_FBW,
16116 NPHY_REV3_RFSEQ_CMD_TR_SWITCH, 16113 NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
@@ -16121,15 +16118,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16121 NPHY_REV3_RFSEQ_CMD_INT_PA_PU, 16118 NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
16122 NPHY_REV3_RFSEQ_CMD_END 16119 NPHY_REV3_RFSEQ_CMD_END
16123 }; 16120 };
16124 u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 }; 16121 static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
16125 u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f }; 16122 static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
16126 16123
16127 s16 alpha0, alpha1, alpha2; 16124 s16 alpha0, alpha1, alpha2;
16128 s16 beta0, beta1, beta2; 16125 s16 beta0, beta1, beta2;
16129 u32 leg_data_weights, ht_data_weights, nss1_data_weights, 16126 u32 leg_data_weights, ht_data_weights, nss1_data_weights,
16130 stbc_data_weights; 16127 stbc_data_weights;
16131 u8 chan_freq_range = 0; 16128 u8 chan_freq_range = 0;
16132 u16 dac_control = 0x0002; 16129 static const u16 dac_control = 0x0002;
16133 u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 }; 16130 u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
16134 u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 }; 16131 u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
16135 u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 }; 16132 u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
@@ -16139,8 +16136,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16139 u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 }; 16136 u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
16140 u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 }; 16137 u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
16141 u16 *aux_adc_gain; 16138 u16 *aux_adc_gain;
16142 u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 }; 16139 static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
16143 u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 }; 16140 static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
16144 s32 min_nvar_val = 0x18d; 16141 s32 min_nvar_val = 0x18d;
16145 s32 min_nvar_offset_6mbps = 20; 16142 s32 min_nvar_offset_6mbps = 20;
16146 u8 pdetrange; 16143 u8 pdetrange;
@@ -16151,9 +16148,9 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16151 u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77; 16148 u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
16152 u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77; 16149 u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77;
16153 u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77; 16150 u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77;
16154 u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 }; 16151 static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
16155 u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 }; 16152 static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
16156 u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 }; 16153 static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
16157 u16 ipalvlshift_3p3_war_en = 0; 16154 u16 ipalvlshift_3p3_war_en = 0;
16158 u16 rccal_bcap_val, rccal_scap_val; 16155 u16 rccal_bcap_val, rccal_scap_val;
16159 u16 rccal_tx20_11b_bcap = 0; 16156 u16 rccal_tx20_11b_bcap = 0;
@@ -24291,13 +24288,13 @@ static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
24291 u16 bbmult; 24288 u16 bbmult;
24292 u16 tblentry; 24289 u16 tblentry;
24293 24290
24294 struct nphy_txiqcal_ladder ladder_lo[] = { 24291 static const struct nphy_txiqcal_ladder ladder_lo[] = {
24295 {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0}, 24292 {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
24296 {25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5}, 24293 {25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
24297 {25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7} 24294 {25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
24298 }; 24295 };
24299 24296
24300 struct nphy_txiqcal_ladder ladder_iq[] = { 24297 static const struct nphy_txiqcal_ladder ladder_iq[] = {
24301 {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0}, 24298 {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
24302 {25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1}, 24299 {25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
24303 {100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7} 24300 {100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
@@ -25773,67 +25770,67 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
25773 u16 cal_gain[2]; 25770 u16 cal_gain[2];
25774 struct nphy_iqcal_params cal_params[2]; 25771 struct nphy_iqcal_params cal_params[2];
25775 u32 tbl_len; 25772 u32 tbl_len;
25776 void *tbl_ptr; 25773 const void *tbl_ptr;
25777 bool ladder_updated[2]; 25774 bool ladder_updated[2];
25778 u8 mphase_cal_lastphase = 0; 25775 u8 mphase_cal_lastphase = 0;
25779 int bcmerror = 0; 25776 int bcmerror = 0;
25780 bool phyhang_avoid_state = false; 25777 bool phyhang_avoid_state = false;
25781 25778
25782 u16 tbl_tx_iqlo_cal_loft_ladder_20[] = { 25779 static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
25783 0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901, 25780 0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
25784 0x1902, 25781 0x1902,
25785 0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607, 25782 0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
25786 0x6407 25783 0x6407
25787 }; 25784 };
25788 25785
25789 u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = { 25786 static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
25790 0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400, 25787 0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
25791 0x3200, 25788 0x3200,
25792 0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406, 25789 0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
25793 0x6407 25790 0x6407
25794 }; 25791 };
25795 25792
25796 u16 tbl_tx_iqlo_cal_loft_ladder_40[] = { 25793 static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
25797 0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201, 25794 0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
25798 0x1202, 25795 0x1202,
25799 0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207, 25796 0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
25800 0x4707 25797 0x4707
25801 }; 25798 };
25802 25799
25803 u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = { 25800 static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
25804 0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900, 25801 0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
25805 0x2300, 25802 0x2300,
25806 0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706, 25803 0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
25807 0x4707 25804 0x4707
25808 }; 25805 };
25809 25806
25810 u16 tbl_tx_iqlo_cal_startcoefs[] = { 25807 static const u16 tbl_tx_iqlo_cal_startcoefs[] = {
25811 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 25808 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
25812 0x0000 25809 0x0000
25813 }; 25810 };
25814 25811
25815 u16 tbl_tx_iqlo_cal_cmds_fullcal[] = { 25812 static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
25816 0x8123, 0x8264, 0x8086, 0x8245, 0x8056, 25813 0x8123, 0x8264, 0x8086, 0x8245, 0x8056,
25817 0x9123, 0x9264, 0x9086, 0x9245, 0x9056 25814 0x9123, 0x9264, 0x9086, 0x9245, 0x9056
25818 }; 25815 };
25819 25816
25820 u16 tbl_tx_iqlo_cal_cmds_recal[] = { 25817 static const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
25821 0x8101, 0x8253, 0x8053, 0x8234, 0x8034, 25818 0x8101, 0x8253, 0x8053, 0x8234, 0x8034,
25822 0x9101, 0x9253, 0x9053, 0x9234, 0x9034 25819 0x9101, 0x9253, 0x9053, 0x9234, 0x9034
25823 }; 25820 };
25824 25821
25825 u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = { 25822 static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
25826 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 25823 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
25827 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 25824 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
25828 0x0000 25825 0x0000
25829 }; 25826 };
25830 25827
25831 u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = { 25828 static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
25832 0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234, 25829 0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234,
25833 0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234 25830 0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234
25834 }; 25831 };
25835 25832
25836 u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = { 25833 static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
25837 0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223, 25834 0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223,
25838 0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223 25835 0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223
25839 }; 25836 };
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index 45e2efc70d19..ce741beec1fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -309,6 +309,7 @@ const struct iwl_cfg iwl3168_2ac_cfg = {
309 .nvm_calib_ver = IWL3168_TX_POWER_VERSION, 309 .nvm_calib_ver = IWL3168_TX_POWER_VERSION,
310 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, 310 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
311 .dccm_len = IWL7265_DCCM_LEN, 311 .dccm_len = IWL7265_DCCM_LEN,
312 .nvm_type = IWL_NVM_SDP,
312}; 313};
313 314
314const struct iwl_cfg iwl7265_2ac_cfg = { 315const struct iwl_cfg iwl7265_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 2e6c52664cee..c2a5936ccede 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -164,7 +164,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
164 .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \ 164 .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
165 .thermal_params = &iwl8000_tt_params, \ 165 .thermal_params = &iwl8000_tt_params, \
166 .apmg_not_supported = true, \ 166 .apmg_not_supported = true, \
167 .ext_nvm = true, \ 167 .nvm_type = IWL_NVM_EXT, \
168 .dbgc_supported = true 168 .dbgc_supported = true
169 169
170#define IWL_DEVICE_8000 \ 170#define IWL_DEVICE_8000 \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index 2babe0a1f18b..e8b5ff42f5a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -148,7 +148,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
148 .vht_mu_mimo_supported = true, \ 148 .vht_mu_mimo_supported = true, \
149 .mac_addr_from_csr = true, \ 149 .mac_addr_from_csr = true, \
150 .rf_id = true, \ 150 .rf_id = true, \
151 .ext_nvm = true, \ 151 .nvm_type = IWL_NVM_EXT, \
152 .dbgc_supported = true 152 .dbgc_supported = true
153 153
154const struct iwl_cfg iwl9160_2ac_cfg = { 154const struct iwl_cfg iwl9160_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
index 76ba1f8bc72f..a440140ed8dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
@@ -133,7 +133,7 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
133 .use_tfh = true, \ 133 .use_tfh = true, \
134 .rf_id = true, \ 134 .rf_id = true, \
135 .gen2 = true, \ 135 .gen2 = true, \
136 .ext_nvm = true, \ 136 .nvm_type = IWL_NVM_EXT, \
137 .dbgc_supported = true 137 .dbgc_supported = true
138 138
139const struct iwl_cfg iwla000_2ac_cfg_hr = { 139const struct iwl_cfg iwla000_2ac_cfg_hr = {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 00bc7a25dece..3fd07bc80f54 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -108,6 +108,7 @@ enum iwl_nvm_access_target {
108 * @NVM_SECTION_TYPE_REGULATORY: regulatory section 108 * @NVM_SECTION_TYPE_REGULATORY: regulatory section
109 * @NVM_SECTION_TYPE_CALIBRATION: calibration section 109 * @NVM_SECTION_TYPE_CALIBRATION: calibration section
110 * @NVM_SECTION_TYPE_PRODUCTION: production section 110 * @NVM_SECTION_TYPE_PRODUCTION: production section
111 * @NVM_SECTION_TYPE_REGULATORY_SDP: regulatory section used by 3168 series
111 * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section 112 * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section
112 * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section 113 * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section
113 * @NVM_MAX_NUM_SECTIONS: number of sections 114 * @NVM_MAX_NUM_SECTIONS: number of sections
@@ -117,6 +118,7 @@ enum iwl_nvm_section_type {
117 NVM_SECTION_TYPE_REGULATORY = 3, 118 NVM_SECTION_TYPE_REGULATORY = 3,
118 NVM_SECTION_TYPE_CALIBRATION = 4, 119 NVM_SECTION_TYPE_CALIBRATION = 4,
119 NVM_SECTION_TYPE_PRODUCTION = 5, 120 NVM_SECTION_TYPE_PRODUCTION = 5,
121 NVM_SECTION_TYPE_REGULATORY_SDP = 8,
120 NVM_SECTION_TYPE_MAC_OVERRIDE = 11, 122 NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
121 NVM_SECTION_TYPE_PHY_SKU = 12, 123 NVM_SECTION_TYPE_PHY_SKU = 12,
122 NVM_MAX_NUM_SECTIONS = 13, 124 NVM_MAX_NUM_SECTIONS = 13,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 6afc7a799892..f5dd7d83cd0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1086,7 +1086,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
1086 1086
1087 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 1087 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1088 /* stop recording */ 1088 /* stop recording */
1089 iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100); 1089 iwl_fw_dbg_stop_recording(fwrt);
1090 1090
1091 iwl_fw_error_dump(fwrt); 1091 iwl_fw_error_dump(fwrt);
1092 1092
@@ -1104,10 +1104,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
1104 u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE); 1104 u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
1105 u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL); 1105 u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
1106 1106
1107 /* stop recording */ 1107 iwl_fw_dbg_stop_recording(fwrt);
1108 iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
1109 udelay(100);
1110 iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
1111 /* wait before we collect the data till the DBGC stop */ 1108 /* wait before we collect the data till the DBGC stop */
1112 udelay(500); 1109 udelay(500);
1113 1110
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 0f810ea89d31..9c889a32fe24 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -68,6 +68,8 @@
68#include <linux/workqueue.h> 68#include <linux/workqueue.h>
69#include <net/cfg80211.h> 69#include <net/cfg80211.h>
70#include "runtime.h" 70#include "runtime.h"
71#include "iwl-prph.h"
72#include "iwl-io.h"
71#include "file.h" 73#include "file.h"
72#include "error-dump.h" 74#include "error-dump.h"
73 75
@@ -194,8 +196,21 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
194 iwl_fw_dbg_get_trigger((fwrt)->fw,\ 196 iwl_fw_dbg_get_trigger((fwrt)->fw,\
195 (trig))) 197 (trig)))
196 198
199static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
200{
201 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
202 iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
203 } else {
204 iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
205 udelay(100);
206 iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
207 }
208}
209
197static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) 210static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
198{ 211{
212 iwl_fw_dbg_stop_recording(fwrt);
213
199 fwrt->dump.conf = FW_DBG_INVALID; 214 fwrt->dump.conf = FW_DBG_INVALID;
200} 215}
201 216
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 3e057b539d5b..71cb1ecde0f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -108,6 +108,18 @@ enum iwl_led_mode {
108 IWL_LED_DISABLE, 108 IWL_LED_DISABLE,
109}; 109};
110 110
111/**
112 * enum iwl_nvm_type - nvm formats
113 * @IWL_NVM: the regular format
114 * @IWL_NVM_EXT: extended NVM format
115 * @IWL_NVM_SDP: NVM format used by 3168 series
116 */
117enum iwl_nvm_type {
118 IWL_NVM,
119 IWL_NVM_EXT,
120 IWL_NVM_SDP,
121};
122
111/* 123/*
112 * This is the threshold value of plcp error rate per 100mSecs. It is 124 * This is the threshold value of plcp error rate per 100mSecs. It is
113 * used to set and check for the validity of plcp_delta. 125 * used to set and check for the validity of plcp_delta.
@@ -320,7 +332,7 @@ struct iwl_pwr_tx_backoff {
320 * @integrated: discrete or integrated 332 * @integrated: discrete or integrated
321 * @gen2: a000 and on transport operation 333 * @gen2: a000 and on transport operation
322 * @cdb: CDB support 334 * @cdb: CDB support
323 * @ext_nvm: extended NVM format 335 * @nvm_type: see &enum iwl_nvm_type
324 * 336 *
325 * We enable the driver to be backward compatible wrt. hardware features. 337 * We enable the driver to be backward compatible wrt. hardware features.
326 * API differences in uCode shouldn't be handled here but through TLVs 338 * API differences in uCode shouldn't be handled here but through TLVs
@@ -342,6 +354,7 @@ struct iwl_cfg {
342 const struct iwl_tt_params *thermal_params; 354 const struct iwl_tt_params *thermal_params;
343 enum iwl_device_family device_family; 355 enum iwl_device_family device_family;
344 enum iwl_led_mode led_mode; 356 enum iwl_led_mode led_mode;
357 enum iwl_nvm_type nvm_type;
345 u32 max_data_size; 358 u32 max_data_size;
346 u32 max_inst_size; 359 u32 max_inst_size;
347 netdev_features_t features; 360 netdev_features_t features;
@@ -369,7 +382,6 @@ struct iwl_cfg {
369 use_tfh:1, 382 use_tfh:1,
370 gen2:1, 383 gen2:1,
371 cdb:1, 384 cdb:1,
372 ext_nvm:1,
373 dbgc_supported:1; 385 dbgc_supported:1;
374 u8 valid_tx_ant; 386 u8 valid_tx_ant;
375 u8 valid_rx_ant; 387 u8 valid_rx_ant;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 3014beef4873..c3a5d8ccc95e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -77,7 +77,7 @@
77#include "iwl-csr.h" 77#include "iwl-csr.h"
78 78
79/* NVM offsets (in words) definitions */ 79/* NVM offsets (in words) definitions */
80enum wkp_nvm_offsets { 80enum nvm_offsets {
81 /* NVM HW-Section offset (in words) definitions */ 81 /* NVM HW-Section offset (in words) definitions */
82 SUBSYSTEM_ID = 0x0A, 82 SUBSYSTEM_ID = 0x0A,
83 HW_ADDR = 0x15, 83 HW_ADDR = 0x15,
@@ -92,7 +92,10 @@ enum wkp_nvm_offsets {
92 92
93 /* NVM calibration section offset (in words) definitions */ 93 /* NVM calibration section offset (in words) definitions */
94 NVM_CALIB_SECTION = 0x2B8, 94 NVM_CALIB_SECTION = 0x2B8,
95 XTAL_CALIB = 0x316 - NVM_CALIB_SECTION 95 XTAL_CALIB = 0x316 - NVM_CALIB_SECTION,
96
97 /* NVM REGULATORY -Section offset (in words) definitions */
98 NVM_CHANNELS_SDP = 0,
96}; 99};
97 100
98enum ext_nvm_offsets { 101enum ext_nvm_offsets {
@@ -206,8 +209,36 @@ enum iwl_nvm_channel_flags {
206 NVM_CHANNEL_DC_HIGH = BIT(12), 209 NVM_CHANNEL_DC_HIGH = BIT(12),
207}; 210};
208 211
212static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
213 int chan, u16 flags)
214{
209#define CHECK_AND_PRINT_I(x) \ 215#define CHECK_AND_PRINT_I(x) \
210 ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "") 216 ((flags & NVM_CHANNEL_##x) ? " " #x : "")
217
218 if (!(flags & NVM_CHANNEL_VALID)) {
219 IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n",
220 chan, flags);
221 return;
222 }
223
224 /* Note: already can print up to 101 characters, 110 is the limit! */
225 IWL_DEBUG_DEV(dev, level,
226 "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
227 chan, flags,
228 CHECK_AND_PRINT_I(VALID),
229 CHECK_AND_PRINT_I(IBSS),
230 CHECK_AND_PRINT_I(ACTIVE),
231 CHECK_AND_PRINT_I(RADAR),
232 CHECK_AND_PRINT_I(INDOOR_ONLY),
233 CHECK_AND_PRINT_I(GO_CONCURRENT),
234 CHECK_AND_PRINT_I(UNIFORM),
235 CHECK_AND_PRINT_I(20MHZ),
236 CHECK_AND_PRINT_I(40MHZ),
237 CHECK_AND_PRINT_I(80MHZ),
238 CHECK_AND_PRINT_I(160MHZ),
239 CHECK_AND_PRINT_I(DC_HIGH));
240#undef CHECK_AND_PRINT_I
241}
211 242
212static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, 243static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
213 u16 nvm_flags, const struct iwl_cfg *cfg) 244 u16 nvm_flags, const struct iwl_cfg *cfg)
@@ -215,7 +246,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
215 u32 flags = IEEE80211_CHAN_NO_HT40; 246 u32 flags = IEEE80211_CHAN_NO_HT40;
216 u32 last_5ghz_ht = LAST_5GHZ_HT; 247 u32 last_5ghz_ht = LAST_5GHZ_HT;
217 248
218 if (cfg->ext_nvm) 249 if (cfg->nvm_type == IWL_NVM_EXT)
219 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; 250 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
220 251
221 if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) { 252 if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
@@ -268,7 +299,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
268 int num_of_ch, num_2ghz_channels; 299 int num_of_ch, num_2ghz_channels;
269 const u8 *nvm_chan; 300 const u8 *nvm_chan;
270 301
271 if (!cfg->ext_nvm) { 302 if (cfg->nvm_type != IWL_NVM_EXT) {
272 num_of_ch = IWL_NUM_CHANNELS; 303 num_of_ch = IWL_NUM_CHANNELS;
273 nvm_chan = &iwl_nvm_channels[0]; 304 nvm_chan = &iwl_nvm_channels[0];
274 num_2ghz_channels = NUM_2GHZ_CHANNELS; 305 num_2ghz_channels = NUM_2GHZ_CHANNELS;
@@ -302,12 +333,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
302 * supported, hence we still want to add them to 333 * supported, hence we still want to add them to
303 * the list of supported channels to cfg80211. 334 * the list of supported channels to cfg80211.
304 */ 335 */
305 IWL_DEBUG_EEPROM(dev, 336 iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
306 "Ch. %d Flags %x [%sGHz] - No traffic\n", 337 nvm_chan[ch_idx], ch_flags);
307 nvm_chan[ch_idx],
308 ch_flags,
309 (ch_idx >= num_2ghz_channels) ?
310 "5.2" : "2.4");
311 continue; 338 continue;
312 } 339 }
313 340
@@ -337,27 +364,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
337 else 364 else
338 channel->flags = 0; 365 channel->flags = 0;
339 366
340 IWL_DEBUG_EEPROM(dev, 367 iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
341 "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n", 368 channel->hw_value, ch_flags);
342 channel->hw_value, 369 IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n",
343 is_5ghz ? "5.2" : "2.4", 370 channel->hw_value, channel->max_power);
344 ch_flags,
345 CHECK_AND_PRINT_I(VALID),
346 CHECK_AND_PRINT_I(IBSS),
347 CHECK_AND_PRINT_I(ACTIVE),
348 CHECK_AND_PRINT_I(RADAR),
349 CHECK_AND_PRINT_I(INDOOR_ONLY),
350 CHECK_AND_PRINT_I(GO_CONCURRENT),
351 CHECK_AND_PRINT_I(UNIFORM),
352 CHECK_AND_PRINT_I(20MHZ),
353 CHECK_AND_PRINT_I(40MHZ),
354 CHECK_AND_PRINT_I(80MHZ),
355 CHECK_AND_PRINT_I(160MHZ),
356 CHECK_AND_PRINT_I(DC_HIGH),
357 channel->max_power,
358 ((ch_flags & NVM_CHANNEL_IBSS) &&
359 !(ch_flags & NVM_CHANNEL_RADAR))
360 ? "" : "not ");
361 } 371 }
362 372
363 return n_channels; 373 return n_channels;
@@ -484,7 +494,7 @@ IWL_EXPORT_SYMBOL(iwl_init_sbands);
484static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, 494static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
485 const __le16 *phy_sku) 495 const __le16 *phy_sku)
486{ 496{
487 if (!cfg->ext_nvm) 497 if (cfg->nvm_type != IWL_NVM_EXT)
488 return le16_to_cpup(nvm_sw + SKU); 498 return le16_to_cpup(nvm_sw + SKU);
489 499
490 return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000)); 500 return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
@@ -492,7 +502,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
492 502
493static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) 503static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
494{ 504{
495 if (!cfg->ext_nvm) 505 if (cfg->nvm_type != IWL_NVM_EXT)
496 return le16_to_cpup(nvm_sw + NVM_VERSION); 506 return le16_to_cpup(nvm_sw + NVM_VERSION);
497 else 507 else
498 return le32_to_cpup((__le32 *)(nvm_sw + 508 return le32_to_cpup((__le32 *)(nvm_sw +
@@ -502,7 +512,7 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
502static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, 512static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
503 const __le16 *phy_sku) 513 const __le16 *phy_sku)
504{ 514{
505 if (!cfg->ext_nvm) 515 if (cfg->nvm_type != IWL_NVM_EXT)
506 return le16_to_cpup(nvm_sw + RADIO_CFG); 516 return le16_to_cpup(nvm_sw + RADIO_CFG);
507 517
508 return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM)); 518 return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
@@ -513,7 +523,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
513{ 523{
514 int n_hw_addr; 524 int n_hw_addr;
515 525
516 if (!cfg->ext_nvm) 526 if (cfg->nvm_type != IWL_NVM_EXT)
517 return le16_to_cpup(nvm_sw + N_HW_ADDRS); 527 return le16_to_cpup(nvm_sw + N_HW_ADDRS);
518 528
519 n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); 529 n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
@@ -525,7 +535,7 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
525 struct iwl_nvm_data *data, 535 struct iwl_nvm_data *data,
526 u32 radio_cfg) 536 u32 radio_cfg)
527{ 537{
528 if (!cfg->ext_nvm) { 538 if (cfg->nvm_type != IWL_NVM_EXT) {
529 data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); 539 data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
530 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg); 540 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
531 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg); 541 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
@@ -634,7 +644,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
634{ 644{
635 if (cfg->mac_addr_from_csr) { 645 if (cfg->mac_addr_from_csr) {
636 iwl_set_hw_address_from_csr(trans, data); 646 iwl_set_hw_address_from_csr(trans, data);
637 } else if (!cfg->ext_nvm) { 647 } else if (cfg->nvm_type != IWL_NVM_EXT) {
638 const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); 648 const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
639 649
640 /* The byte order is little endian 16 bit, meaning 214365 */ 650 /* The byte order is little endian 16 bit, meaning 214365 */
@@ -706,7 +716,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
706 u16 lar_config; 716 u16 lar_config;
707 const __le16 *ch_section; 717 const __le16 *ch_section;
708 718
709 if (!cfg->ext_nvm) 719 if (cfg->nvm_type != IWL_NVM_EXT)
710 data = kzalloc(sizeof(*data) + 720 data = kzalloc(sizeof(*data) +
711 sizeof(struct ieee80211_channel) * 721 sizeof(struct ieee80211_channel) *
712 IWL_NUM_CHANNELS, 722 IWL_NUM_CHANNELS,
@@ -740,7 +750,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
740 750
741 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); 751 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
742 752
743 if (!cfg->ext_nvm) { 753 if (cfg->nvm_type != IWL_NVM_EXT) {
744 /* Checking for required sections */ 754 /* Checking for required sections */
745 if (!nvm_calib) { 755 if (!nvm_calib) {
746 IWL_ERR(trans, 756 IWL_ERR(trans,
@@ -748,11 +758,15 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
748 kfree(data); 758 kfree(data);
749 return NULL; 759 return NULL;
750 } 760 }
761
762 ch_section = cfg->nvm_type == IWL_NVM_SDP ?
763 &regulatory[NVM_CHANNELS_SDP] :
764 &nvm_sw[NVM_CHANNELS];
765
751 /* in family 8000 Xtal calibration values moved to OTP */ 766 /* in family 8000 Xtal calibration values moved to OTP */
752 data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); 767 data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
753 data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); 768 data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
754 lar_enabled = true; 769 lar_enabled = true;
755 ch_section = &nvm_sw[NVM_CHANNELS];
756 } else { 770 } else {
757 u16 lar_offset = data->nvm_version < 0xE39 ? 771 u16 lar_offset = data->nvm_version < 0xE39 ?
758 NVM_LAR_OFFSET_OLD : 772 NVM_LAR_OFFSET_OLD :
@@ -786,7 +800,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
786 u32 flags = NL80211_RRF_NO_HT40; 800 u32 flags = NL80211_RRF_NO_HT40;
787 u32 last_5ghz_ht = LAST_5GHZ_HT; 801 u32 last_5ghz_ht = LAST_5GHZ_HT;
788 802
789 if (cfg->ext_nvm) 803 if (cfg->nvm_type == IWL_NVM_EXT)
790 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; 804 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
791 805
792 if (ch_idx < NUM_2GHZ_CHANNELS && 806 if (ch_idx < NUM_2GHZ_CHANNELS &&
@@ -834,7 +848,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
834 int ch_idx; 848 int ch_idx;
835 u16 ch_flags; 849 u16 ch_flags;
836 u32 reg_rule_flags, prev_reg_rule_flags = 0; 850 u32 reg_rule_flags, prev_reg_rule_flags = 0;
837 const u8 *nvm_chan = cfg->ext_nvm ? 851 const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
838 iwl_ext_nvm_channels : iwl_nvm_channels; 852 iwl_ext_nvm_channels : iwl_nvm_channels;
839 struct ieee80211_regdomain *regd; 853 struct ieee80211_regdomain *regd;
840 int size_of_regd; 854 int size_of_regd;
@@ -843,7 +857,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
843 int center_freq, prev_center_freq = 0; 857 int center_freq, prev_center_freq = 0;
844 int valid_rules = 0; 858 int valid_rules = 0;
845 bool new_rule; 859 bool new_rule;
846 int max_num_ch = cfg->ext_nvm ? 860 int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
847 IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS; 861 IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS;
848 862
849 if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) 863 if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
@@ -873,12 +887,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
873 new_rule = false; 887 new_rule = false;
874 888
875 if (!(ch_flags & NVM_CHANNEL_VALID)) { 889 if (!(ch_flags & NVM_CHANNEL_VALID)) {
876 IWL_DEBUG_DEV(dev, IWL_DL_LAR, 890 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
877 "Ch. %d Flags %x [%sGHz] - No traffic\n", 891 nvm_chan[ch_idx], ch_flags);
878 nvm_chan[ch_idx],
879 ch_flags,
880 (ch_idx >= NUM_2GHZ_CHANNELS) ?
881 "5.2" : "2.4");
882 continue; 892 continue;
883 } 893 }
884 894
@@ -914,31 +924,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
914 prev_center_freq = center_freq; 924 prev_center_freq = center_freq;
915 prev_reg_rule_flags = reg_rule_flags; 925 prev_reg_rule_flags = reg_rule_flags;
916 926
917 IWL_DEBUG_DEV(dev, IWL_DL_LAR, 927 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
918 "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s%s%s%s(0x%02x)\n", 928 nvm_chan[ch_idx], ch_flags);
919 center_freq,
920 band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
921 CHECK_AND_PRINT_I(VALID),
922 CHECK_AND_PRINT_I(IBSS),
923 CHECK_AND_PRINT_I(ACTIVE),
924 CHECK_AND_PRINT_I(RADAR),
925 CHECK_AND_PRINT_I(INDOOR_ONLY),
926 CHECK_AND_PRINT_I(GO_CONCURRENT),
927 CHECK_AND_PRINT_I(UNIFORM),
928 CHECK_AND_PRINT_I(20MHZ),
929 CHECK_AND_PRINT_I(40MHZ),
930 CHECK_AND_PRINT_I(80MHZ),
931 CHECK_AND_PRINT_I(160MHZ),
932 CHECK_AND_PRINT_I(DC_HIGH),
933 ch_flags);
934 IWL_DEBUG_DEV(dev, IWL_DL_LAR,
935 "Ch. %d [%sGHz] reg_flags 0x%x: %s\n",
936 center_freq,
937 band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
938 reg_rule_flags,
939 ((ch_flags & NVM_CHANNEL_ACTIVE) &&
940 !(ch_flags & NVM_CHANNEL_RADAR))
941 ? "Ad-Hoc" : "");
942 } 929 }
943 930
944 regd->n_reg_rules = valid_rules; 931 regd->n_reg_rules = valid_rules;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 3bcaa82f59b2..a9ac872226fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1077,6 +1077,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1077 mvm->vif_count = 0; 1077 mvm->vif_count = 0;
1078 mvm->rx_ba_sessions = 0; 1078 mvm->rx_ba_sessions = 0;
1079 mvm->fwrt.dump.conf = FW_DBG_INVALID; 1079 mvm->fwrt.dump.conf = FW_DBG_INVALID;
1080 mvm->monitor_on = false;
1080 1081
1081 /* keep statistics ticking */ 1082 /* keep statistics ticking */
1082 iwl_mvm_accu_radio_stats(mvm); 1083 iwl_mvm_accu_radio_stats(mvm);
@@ -1437,6 +1438,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1437 mvm->p2p_device_vif = vif; 1438 mvm->p2p_device_vif = vif;
1438 } 1439 }
1439 1440
1441 if (vif->type == NL80211_IFTYPE_MONITOR)
1442 mvm->monitor_on = true;
1443
1440 iwl_mvm_vif_dbgfs_register(mvm, vif); 1444 iwl_mvm_vif_dbgfs_register(mvm, vif);
1441 goto out_unlock; 1445 goto out_unlock;
1442 1446
@@ -1526,6 +1530,9 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1526 iwl_mvm_power_update_mac(mvm); 1530 iwl_mvm_power_update_mac(mvm);
1527 iwl_mvm_mac_ctxt_remove(mvm, vif); 1531 iwl_mvm_mac_ctxt_remove(mvm, vif);
1528 1532
1533 if (vif->type == NL80211_IFTYPE_MONITOR)
1534 mvm->monitor_on = false;
1535
1529out_release: 1536out_release:
1530 mutex_unlock(&mvm->mutex); 1537 mutex_unlock(&mvm->mutex);
1531} 1538}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 83303bac0e4b..949e63418299 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1015,6 +1015,9 @@ struct iwl_mvm {
1015 bool drop_bcn_ap_mode; 1015 bool drop_bcn_ap_mode;
1016 1016
1017 struct delayed_work cs_tx_unblock_dwork; 1017 struct delayed_work cs_tx_unblock_dwork;
1018
1019 /* does a monitor vif exist (only one can exist hence bool) */
1020 bool monitor_on;
1018#ifdef CONFIG_ACPI 1021#ifdef CONFIG_ACPI
1019 struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM]; 1022 struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
1020 struct iwl_mvm_geo_profile geo_profiles[IWL_NUM_GEO_PROFILES]; 1023 struct iwl_mvm_geo_profile geo_profiles[IWL_NUM_GEO_PROFILES];
@@ -1159,7 +1162,7 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
1159 * Enable LAR only if it is supported by the FW (TLV) && 1162 * Enable LAR only if it is supported by the FW (TLV) &&
1160 * enabled in the NVM 1163 * enabled in the NVM
1161 */ 1164 */
1162 if (mvm->cfg->ext_nvm) 1165 if (mvm->cfg->nvm_type == IWL_NVM_EXT)
1163 return nvm_lar && tlv_lar; 1166 return nvm_lar && tlv_lar;
1164 else 1167 else
1165 return tlv_lar; 1168 return tlv_lar;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 422aa6be9932..fb25b6f29323 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -295,18 +295,24 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
295 const __be16 *hw; 295 const __be16 *hw;
296 const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; 296 const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
297 bool lar_enabled; 297 bool lar_enabled;
298 int regulatory_type;
298 299
299 /* Checking for required sections */ 300 /* Checking for required sections */
300 if (!mvm->trans->cfg->ext_nvm) { 301 if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
301 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || 302 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
302 !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { 303 !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
303 IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); 304 IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
304 return NULL; 305 return NULL;
305 } 306 }
306 } else { 307 } else {
308 if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP)
309 regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP;
310 else
311 regulatory_type = NVM_SECTION_TYPE_REGULATORY;
312
307 /* SW and REGULATORY sections are mandatory */ 313 /* SW and REGULATORY sections are mandatory */
308 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || 314 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
309 !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) { 315 !mvm->nvm_sections[regulatory_type].data) {
310 IWL_ERR(mvm, 316 IWL_ERR(mvm,
311 "Can't parse empty family 8000 OTP/NVM sections\n"); 317 "Can't parse empty family 8000 OTP/NVM sections\n");
312 return NULL; 318 return NULL;
@@ -330,11 +336,14 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
330 hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data; 336 hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data;
331 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; 337 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
332 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; 338 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
333 regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
334 mac_override = 339 mac_override =
335 (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data; 340 (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
336 phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data; 341 phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
337 342
343 regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ?
344 (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
345 (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
346
338 lar_enabled = !iwlwifi_mod_params.lar_disable && 347 lar_enabled = !iwlwifi_mod_params.lar_disable &&
339 fw_has_capa(&mvm->fw->ucode_capa, 348 fw_has_capa(&mvm->fw->ucode_capa,
340 IWL_UCODE_TLV_CAPA_LAR_SUPPORT); 349 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
@@ -394,7 +403,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
394 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n"); 403 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
395 404
396 /* Maximal size depends on NVM version */ 405 /* Maximal size depends on NVM version */
397 if (!mvm->trans->cfg->ext_nvm) 406 if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT)
398 max_section_size = IWL_MAX_NVM_SECTION_SIZE; 407 max_section_size = IWL_MAX_NVM_SECTION_SIZE;
399 else 408 else
400 max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; 409 max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
@@ -465,7 +474,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
465 break; 474 break;
466 } 475 }
467 476
468 if (!mvm->trans->cfg->ext_nvm) { 477 if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
469 section_size = 478 section_size =
470 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); 479 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
471 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); 480 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
@@ -740,7 +749,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
740 struct ieee80211_regdomain *regd; 749 struct ieee80211_regdomain *regd;
741 char mcc[3]; 750 char mcc[3];
742 751
743 if (mvm->cfg->ext_nvm) { 752 if (mvm->cfg->nvm_type == IWL_NVM_EXT) {
744 tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, 753 tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
745 IWL_UCODE_TLV_CAPA_LAR_SUPPORT); 754 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
746 nvm_lar = mvm->nvm_data->lar_enabled; 755 nvm_lar = mvm->nvm_data->lar_enabled;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 184c749766f2..2d14a58cbdd7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -244,7 +244,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
244 return 0; 244 return 0;
245 245
246 default: 246 default:
247 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); 247 /* Expected in monitor (not having the keys) */
248 if (!mvm->monitor_on)
249 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
248 } 250 }
249 251
250 return 0; 252 return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 77f77bc5d083..248699c2c4bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -277,7 +277,9 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
277 stats->flag |= RX_FLAG_DECRYPTED; 277 stats->flag |= RX_FLAG_DECRYPTED;
278 return 0; 278 return 0;
279 default: 279 default:
280 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status); 280 /* Expected in monitor (not having the keys) */
281 if (!mvm->monitor_on)
282 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
281 } 283 }
282 284
283 return 0; 285 return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 4d907f60bce9..1232f63278eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -631,7 +631,7 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
631 631
632 if (!iwl_mvm_firmware_running(mvm) || 632 if (!iwl_mvm_firmware_running(mvm) ||
633 mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { 633 mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
634 ret = -EIO; 634 ret = -ENODATA;
635 goto out; 635 goto out;
636 } 636 }
637 637
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 4f73012978e9..1d431d4bf6d2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1122,7 +1122,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
1122 } 1122 }
1123 if (0 == tmp) { 1123 if (0 == tmp) {
1124 read_addr = REG_DBI_RDATA + addr % 4; 1124 read_addr = REG_DBI_RDATA + addr % 4;
1125 ret = rtl_read_byte(rtlpriv, read_addr); 1125 ret = rtl_read_word(rtlpriv, read_addr);
1126 } 1126 }
1127 return ret; 1127 return ret;
1128} 1128}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index ee8ed9da00ad..4491ca5aee90 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -486,7 +486,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
486 486
487 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 487 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
488 488
489 dev->min_mtu = 0; 489 dev->min_mtu = ETH_MIN_MTU;
490 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; 490 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
491 491
492 /* 492 /*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 523387e71a80..8b8689c6d887 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1316,7 +1316,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1316 netdev->features |= netdev->hw_features; 1316 netdev->features |= netdev->hw_features;
1317 1317
1318 netdev->ethtool_ops = &xennet_ethtool_ops; 1318 netdev->ethtool_ops = &xennet_ethtool_ops;
1319 netdev->min_mtu = 0; 1319 netdev->min_mtu = ETH_MIN_MTU;
1320 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; 1320 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1321 SET_NETDEV_DEV(netdev, &dev->dev); 1321 SET_NETDEV_DEV(netdev, &dev->dev);
1322 1322
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index d94dd8b77abd..98258583abb0 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -44,7 +44,7 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id)
44 return -EINVAL; 44 return -EINVAL;
45} 45}
46 46
47static void of_mdiobus_register_phy(struct mii_bus *mdio, 47static int of_mdiobus_register_phy(struct mii_bus *mdio,
48 struct device_node *child, u32 addr) 48 struct device_node *child, u32 addr)
49{ 49{
50 struct phy_device *phy; 50 struct phy_device *phy;
@@ -60,9 +60,13 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
60 else 60 else
61 phy = get_phy_device(mdio, addr, is_c45); 61 phy = get_phy_device(mdio, addr, is_c45);
62 if (IS_ERR(phy)) 62 if (IS_ERR(phy))
63 return; 63 return PTR_ERR(phy);
64 64
65 rc = irq_of_parse_and_map(child, 0); 65 rc = of_irq_get(child, 0);
66 if (rc == -EPROBE_DEFER) {
67 phy_device_free(phy);
68 return rc;
69 }
66 if (rc > 0) { 70 if (rc > 0) {
67 phy->irq = rc; 71 phy->irq = rc;
68 mdio->irq[addr] = rc; 72 mdio->irq[addr] = rc;
@@ -84,22 +88,23 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
84 if (rc) { 88 if (rc) {
85 phy_device_free(phy); 89 phy_device_free(phy);
86 of_node_put(child); 90 of_node_put(child);
87 return; 91 return rc;
88 } 92 }
89 93
90 dev_dbg(&mdio->dev, "registered phy %s at address %i\n", 94 dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
91 child->name, addr); 95 child->name, addr);
96 return 0;
92} 97}
93 98
94static void of_mdiobus_register_device(struct mii_bus *mdio, 99static int of_mdiobus_register_device(struct mii_bus *mdio,
95 struct device_node *child, u32 addr) 100 struct device_node *child, u32 addr)
96{ 101{
97 struct mdio_device *mdiodev; 102 struct mdio_device *mdiodev;
98 int rc; 103 int rc;
99 104
100 mdiodev = mdio_device_create(mdio, addr); 105 mdiodev = mdio_device_create(mdio, addr);
101 if (IS_ERR(mdiodev)) 106 if (IS_ERR(mdiodev))
102 return; 107 return PTR_ERR(mdiodev);
103 108
104 /* Associate the OF node with the device structure so it 109 /* Associate the OF node with the device structure so it
105 * can be looked up later. 110 * can be looked up later.
@@ -112,11 +117,12 @@ static void of_mdiobus_register_device(struct mii_bus *mdio,
112 if (rc) { 117 if (rc) {
113 mdio_device_free(mdiodev); 118 mdio_device_free(mdiodev);
114 of_node_put(child); 119 of_node_put(child);
115 return; 120 return rc;
116 } 121 }
117 122
118 dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n", 123 dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
119 child->name, addr); 124 child->name, addr);
125 return 0;
120} 126}
121 127
122/* The following is a list of PHY compatible strings which appear in 128/* The following is a list of PHY compatible strings which appear in
@@ -219,9 +225,11 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
219 } 225 }
220 226
221 if (of_mdiobus_child_is_phy(child)) 227 if (of_mdiobus_child_is_phy(child))
222 of_mdiobus_register_phy(mdio, child, addr); 228 rc = of_mdiobus_register_phy(mdio, child, addr);
223 else 229 else
224 of_mdiobus_register_device(mdio, child, addr); 230 rc = of_mdiobus_register_device(mdio, child, addr);
231 if (rc)
232 goto unregister;
225 } 233 }
226 234
227 if (!scanphys) 235 if (!scanphys)
@@ -242,12 +250,19 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
242 dev_info(&mdio->dev, "scan phy %s at address %i\n", 250 dev_info(&mdio->dev, "scan phy %s at address %i\n",
243 child->name, addr); 251 child->name, addr);
244 252
245 if (of_mdiobus_child_is_phy(child)) 253 if (of_mdiobus_child_is_phy(child)) {
246 of_mdiobus_register_phy(mdio, child, addr); 254 rc = of_mdiobus_register_phy(mdio, child, addr);
255 if (rc)
256 goto unregister;
257 }
247 } 258 }
248 } 259 }
249 260
250 return 0; 261 return 0;
262
263unregister:
264 mdiobus_unregister(mdio);
265 return rc;
251} 266}
252EXPORT_SYMBOL(of_mdiobus_register); 267EXPORT_SYMBOL(of_mdiobus_register);
253 268
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index c60904ff40b8..3907bbc9c6cf 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -40,8 +40,9 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
40 struct socfpga_reset_data *data = container_of(rcdev, 40 struct socfpga_reset_data *data = container_of(rcdev,
41 struct socfpga_reset_data, 41 struct socfpga_reset_data,
42 rcdev); 42 rcdev);
43 int bank = id / BITS_PER_LONG; 43 int reg_width = sizeof(u32);
44 int offset = id % BITS_PER_LONG; 44 int bank = id / (reg_width * BITS_PER_BYTE);
45 int offset = id % (reg_width * BITS_PER_BYTE);
45 unsigned long flags; 46 unsigned long flags;
46 u32 reg; 47 u32 reg;
47 48
@@ -61,8 +62,9 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
61 struct socfpga_reset_data, 62 struct socfpga_reset_data,
62 rcdev); 63 rcdev);
63 64
64 int bank = id / BITS_PER_LONG; 65 int reg_width = sizeof(u32);
65 int offset = id % BITS_PER_LONG; 66 int bank = id / (reg_width * BITS_PER_BYTE);
67 int offset = id % (reg_width * BITS_PER_BYTE);
66 unsigned long flags; 68 unsigned long flags;
67 u32 reg; 69 u32 reg;
68 70
@@ -81,8 +83,9 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev,
81{ 83{
82 struct socfpga_reset_data *data = container_of(rcdev, 84 struct socfpga_reset_data *data = container_of(rcdev,
83 struct socfpga_reset_data, rcdev); 85 struct socfpga_reset_data, rcdev);
84 int bank = id / BITS_PER_LONG; 86 int reg_width = sizeof(u32);
85 int offset = id % BITS_PER_LONG; 87 int bank = id / (reg_width * BITS_PER_BYTE);
88 int offset = id % (reg_width * BITS_PER_BYTE);
86 u32 reg; 89 u32 reg;
87 90
88 reg = readl(data->membase + (bank * BANK_INCREMENT)); 91 reg = readl(data->membase + (bank * BANK_INCREMENT));
@@ -132,7 +135,7 @@ static int socfpga_reset_probe(struct platform_device *pdev)
132 spin_lock_init(&data->lock); 135 spin_lock_init(&data->lock);
133 136
134 data->rcdev.owner = THIS_MODULE; 137 data->rcdev.owner = THIS_MODULE;
135 data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG; 138 data->rcdev.nr_resets = NR_BANKS * (sizeof(u32) * BITS_PER_BYTE);
136 data->rcdev.ops = &socfpga_reset_ops; 139 data->rcdev.ops = &socfpga_reset_ops;
137 data->rcdev.of_node = pdev->dev.of_node; 140 data->rcdev.of_node = pdev->dev.of_node;
138 141
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 520325867e2b..31d31aad3de1 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -383,11 +383,11 @@ static void fc_rport_work(struct work_struct *work)
383 fc_rport_enter_flogi(rdata); 383 fc_rport_enter_flogi(rdata);
384 mutex_unlock(&rdata->rp_mutex); 384 mutex_unlock(&rdata->rp_mutex);
385 } else { 385 } else {
386 mutex_unlock(&rdata->rp_mutex);
386 FC_RPORT_DBG(rdata, "work delete\n"); 387 FC_RPORT_DBG(rdata, "work delete\n");
387 mutex_lock(&lport->disc.disc_mutex); 388 mutex_lock(&lport->disc.disc_mutex);
388 list_del_rcu(&rdata->peers); 389 list_del_rcu(&rdata->peers);
389 mutex_unlock(&lport->disc.disc_mutex); 390 mutex_unlock(&lport->disc.disc_mutex);
390 mutex_unlock(&rdata->rp_mutex);
391 kref_put(&rdata->kref, fc_rport_destroy); 391 kref_put(&rdata->kref, fc_rport_destroy);
392 } 392 }
393 } else { 393 } else {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c62e8d111fd9..f8dc1601efd5 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1728,7 +1728,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1728 1728
1729 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { 1729 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1730 reason = FAILURE_SESSION_IN_RECOVERY; 1730 reason = FAILURE_SESSION_IN_RECOVERY;
1731 sc->result = DID_REQUEUE; 1731 sc->result = DID_REQUEUE << 16;
1732 goto fault; 1732 goto fault;
1733 } 1733 }
1734 1734
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5b2437a5ea44..937209805baf 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3175,6 +3175,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3175 host->can_queue, base_vha->req, 3175 host->can_queue, base_vha->req,
3176 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3176 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
3177 3177
3178 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
3179
3178 if (ha->mqenable) { 3180 if (ha->mqenable) {
3179 bool mq = false; 3181 bool mq = false;
3180 bool startit = false; 3182 bool startit = false;
@@ -3223,7 +3225,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3223 */ 3225 */
3224 qla2xxx_wake_dpc(base_vha); 3226 qla2xxx_wake_dpc(base_vha);
3225 3227
3226 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
3227 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 3228 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
3228 3229
3229 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 3230 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index bf53356f41f0..f796bd61f3f0 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1376,13 +1376,19 @@ static void __scsi_remove_target(struct scsi_target *starget)
1376 spin_lock_irqsave(shost->host_lock, flags); 1376 spin_lock_irqsave(shost->host_lock, flags);
1377 restart: 1377 restart:
1378 list_for_each_entry(sdev, &shost->__devices, siblings) { 1378 list_for_each_entry(sdev, &shost->__devices, siblings) {
1379 /*
1380 * We cannot call scsi_device_get() here, as
1381 * we might've been called from rmmod() causing
1382 * scsi_device_get() to fail the module_is_live()
1383 * check.
1384 */
1379 if (sdev->channel != starget->channel || 1385 if (sdev->channel != starget->channel ||
1380 sdev->id != starget->id || 1386 sdev->id != starget->id ||
1381 scsi_device_get(sdev)) 1387 !get_device(&sdev->sdev_gendev))
1382 continue; 1388 continue;
1383 spin_unlock_irqrestore(shost->host_lock, flags); 1389 spin_unlock_irqrestore(shost->host_lock, flags);
1384 scsi_remove_device(sdev); 1390 scsi_remove_device(sdev);
1385 scsi_device_put(sdev); 1391 put_device(&sdev->sdev_gendev);
1386 spin_lock_irqsave(shost->host_lock, flags); 1392 spin_lock_irqsave(shost->host_lock, flags);
1387 goto restart; 1393 goto restart;
1388 } 1394 }
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index cbd4495d0ff9..8c46a6d536af 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3320,6 +3320,9 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
3320{ 3320{
3321 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 3321 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
3322 3322
3323 if (WARN_ON_ONCE(!rport))
3324 return FAST_IO_FAIL;
3325
3323 return fc_block_rport(rport); 3326 return fc_block_rport(rport);
3324} 3327}
3325EXPORT_SYMBOL(fc_block_scsi_eh); 3328EXPORT_SYMBOL(fc_block_scsi_eh);
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index d96f4512224f..b55e5ebba8b4 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -400,10 +400,10 @@ static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
400 struct media_link, list); 400 struct media_link, list);
401 ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source); 401 ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
402 if (ret) 402 if (ret)
403 break; 403 return ret;
404 } 404 }
405 405
406 return ret; 406 return 0;
407} 407}
408 408
409/* async subdev complete notifier */ 409/* async subdev complete notifier */
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 35a128acfbd1..161694b66038 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1135,7 +1135,7 @@ static int btrfs_fill_super(struct super_block *sb,
1135#ifdef CONFIG_BTRFS_FS_POSIX_ACL 1135#ifdef CONFIG_BTRFS_FS_POSIX_ACL
1136 sb->s_flags |= MS_POSIXACL; 1136 sb->s_flags |= MS_POSIXACL;
1137#endif 1137#endif
1138 sb->s_flags |= MS_I_VERSION; 1138 sb->s_flags |= SB_I_VERSION;
1139 sb->s_iflags |= SB_I_CGROUPWB; 1139 sb->s_iflags |= SB_I_CGROUPWB;
1140 1140
1141 err = super_setup_bdi(sb); 1141 err = super_setup_bdi(sb);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 018c588c7ac3..8e704d12a1cf 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -109,6 +109,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
109 goto out; 109 goto out;
110 } 110 }
111 ukp = user_key_payload_locked(keyring_key); 111 ukp = user_key_payload_locked(keyring_key);
112 if (!ukp) {
113 /* key was revoked before we acquired its semaphore */
114 res = -EKEYREVOKED;
115 goto out;
116 }
112 if (ukp->datalen != sizeof(struct fscrypt_key)) { 117 if (ukp->datalen != sizeof(struct fscrypt_key)) {
113 res = -EINVAL; 118 res = -EINVAL;
114 goto out; 119 goto out;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 96415c65bbdc..b53e66d9abd7 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -45,6 +45,12 @@
45#define DIO_PAGES 64 45#define DIO_PAGES 64
46 46
47/* 47/*
48 * Flags for dio_complete()
49 */
50#define DIO_COMPLETE_ASYNC 0x01 /* This is async IO */
51#define DIO_COMPLETE_INVALIDATE 0x02 /* Can invalidate pages */
52
53/*
48 * This code generally works in units of "dio_blocks". A dio_block is 54 * This code generally works in units of "dio_blocks". A dio_block is
49 * somewhere between the hard sector size and the filesystem block size. it 55 * somewhere between the hard sector size and the filesystem block size. it
50 * is determined on a per-invocation basis. When talking to the filesystem 56 * is determined on a per-invocation basis. When talking to the filesystem
@@ -225,7 +231,7 @@ static inline struct page *dio_get_page(struct dio *dio,
225 * filesystems can use it to hold additional state between get_block calls and 231 * filesystems can use it to hold additional state between get_block calls and
226 * dio_complete. 232 * dio_complete.
227 */ 233 */
228static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) 234static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
229{ 235{
230 loff_t offset = dio->iocb->ki_pos; 236 loff_t offset = dio->iocb->ki_pos;
231 ssize_t transferred = 0; 237 ssize_t transferred = 0;
@@ -259,14 +265,27 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
259 if (ret == 0) 265 if (ret == 0)
260 ret = transferred; 266 ret = transferred;
261 267
268 if (dio->end_io) {
269 // XXX: ki_pos??
270 err = dio->end_io(dio->iocb, offset, ret, dio->private);
271 if (err)
272 ret = err;
273 }
274
262 /* 275 /*
263 * Try again to invalidate clean pages which might have been cached by 276 * Try again to invalidate clean pages which might have been cached by
264 * non-direct readahead, or faulted in by get_user_pages() if the source 277 * non-direct readahead, or faulted in by get_user_pages() if the source
265 * of the write was an mmap'ed region of the file we're writing. Either 278 * of the write was an mmap'ed region of the file we're writing. Either
266 * one is a pretty crazy thing to do, so we don't support it 100%. If 279 * one is a pretty crazy thing to do, so we don't support it 100%. If
267 * this invalidation fails, tough, the write still worked... 280 * this invalidation fails, tough, the write still worked...
281 *
282 * And this page cache invalidation has to be after dio->end_io(), as
283 * some filesystems convert unwritten extents to real allocations in
284 * end_io() when necessary, otherwise a racing buffer read would cache
285 * zeros from unwritten extents.
268 */ 286 */
269 if (ret > 0 && dio->op == REQ_OP_WRITE && 287 if (flags & DIO_COMPLETE_INVALIDATE &&
288 ret > 0 && dio->op == REQ_OP_WRITE &&
270 dio->inode->i_mapping->nrpages) { 289 dio->inode->i_mapping->nrpages) {
271 err = invalidate_inode_pages2_range(dio->inode->i_mapping, 290 err = invalidate_inode_pages2_range(dio->inode->i_mapping,
272 offset >> PAGE_SHIFT, 291 offset >> PAGE_SHIFT,
@@ -274,18 +293,10 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
274 WARN_ON_ONCE(err); 293 WARN_ON_ONCE(err);
275 } 294 }
276 295
277 if (dio->end_io) {
278
279 // XXX: ki_pos??
280 err = dio->end_io(dio->iocb, offset, ret, dio->private);
281 if (err)
282 ret = err;
283 }
284
285 if (!(dio->flags & DIO_SKIP_DIO_COUNT)) 296 if (!(dio->flags & DIO_SKIP_DIO_COUNT))
286 inode_dio_end(dio->inode); 297 inode_dio_end(dio->inode);
287 298
288 if (is_async) { 299 if (flags & DIO_COMPLETE_ASYNC) {
289 /* 300 /*
290 * generic_write_sync expects ki_pos to have been updated 301 * generic_write_sync expects ki_pos to have been updated
291 * already, but the submission path only does this for 302 * already, but the submission path only does this for
@@ -306,7 +317,7 @@ static void dio_aio_complete_work(struct work_struct *work)
306{ 317{
307 struct dio *dio = container_of(work, struct dio, complete_work); 318 struct dio *dio = container_of(work, struct dio, complete_work);
308 319
309 dio_complete(dio, 0, true); 320 dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
310} 321}
311 322
312static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); 323static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
@@ -348,7 +359,7 @@ static void dio_bio_end_aio(struct bio *bio)
348 queue_work(dio->inode->i_sb->s_dio_done_wq, 359 queue_work(dio->inode->i_sb->s_dio_done_wq,
349 &dio->complete_work); 360 &dio->complete_work);
350 } else { 361 } else {
351 dio_complete(dio, 0, true); 362 dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
352 } 363 }
353 } 364 }
354} 365}
@@ -1360,7 +1371,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1360 dio_await_completion(dio); 1371 dio_await_completion(dio);
1361 1372
1362 if (drop_refcount(dio) == 0) { 1373 if (drop_refcount(dio) == 0) {
1363 retval = dio_complete(dio, retval, false); 1374 retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
1364 } else 1375 } else
1365 BUG_ON(retval != -EIOCBQUEUED); 1376 BUG_ON(retval != -EIOCBQUEUED);
1366 1377
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 9c351bf757b2..3fbc0ff79699 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -84,11 +84,16 @@ struct ecryptfs_page_crypt_context {
84static inline struct ecryptfs_auth_tok * 84static inline struct ecryptfs_auth_tok *
85ecryptfs_get_encrypted_key_payload_data(struct key *key) 85ecryptfs_get_encrypted_key_payload_data(struct key *key)
86{ 86{
87 if (key->type == &key_type_encrypted) 87 struct encrypted_key_payload *payload;
88 return (struct ecryptfs_auth_tok *) 88
89 (&((struct encrypted_key_payload *)key->payload.data[0])->payload_data); 89 if (key->type != &key_type_encrypted)
90 else
91 return NULL; 90 return NULL;
91
92 payload = key->payload.data[0];
93 if (!payload)
94 return ERR_PTR(-EKEYREVOKED);
95
96 return (struct ecryptfs_auth_tok *)payload->payload_data;
92} 97}
93 98
94static inline struct key *ecryptfs_get_encrypted_key(char *sig) 99static inline struct key *ecryptfs_get_encrypted_key(char *sig)
@@ -114,12 +119,17 @@ static inline struct ecryptfs_auth_tok *
114ecryptfs_get_key_payload_data(struct key *key) 119ecryptfs_get_key_payload_data(struct key *key)
115{ 120{
116 struct ecryptfs_auth_tok *auth_tok; 121 struct ecryptfs_auth_tok *auth_tok;
122 struct user_key_payload *ukp;
117 123
118 auth_tok = ecryptfs_get_encrypted_key_payload_data(key); 124 auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
119 if (!auth_tok) 125 if (auth_tok)
120 return (struct ecryptfs_auth_tok *)user_key_payload_locked(key)->data;
121 else
122 return auth_tok; 126 return auth_tok;
127
128 ukp = user_key_payload_locked(key);
129 if (!ukp)
130 return ERR_PTR(-EKEYREVOKED);
131
132 return (struct ecryptfs_auth_tok *)ukp->data;
123} 133}
124 134
125#define ECRYPTFS_MAX_KEYSET_SIZE 1024 135#define ECRYPTFS_MAX_KEYSET_SIZE 1024
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 3cf1546dca82..fa218cd64f74 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -459,7 +459,8 @@ out:
459 * @auth_tok_key: key containing the authentication token 459 * @auth_tok_key: key containing the authentication token
460 * @auth_tok: authentication token 460 * @auth_tok: authentication token
461 * 461 *
462 * Returns zero on valid auth tok; -EINVAL otherwise 462 * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
463 * -EKEYREVOKED if the key was revoked before we acquired its semaphore.
463 */ 464 */
464static int 465static int
465ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key, 466ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
@@ -468,6 +469,12 @@ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
468 int rc = 0; 469 int rc = 0;
469 470
470 (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key); 471 (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
472 if (IS_ERR(*auth_tok)) {
473 rc = PTR_ERR(*auth_tok);
474 *auth_tok = NULL;
475 goto out;
476 }
477
471 if (ecryptfs_verify_version((*auth_tok)->version)) { 478 if (ecryptfs_verify_version((*auth_tok)->version)) {
472 printk(KERN_ERR "Data structure version mismatch. Userspace " 479 printk(KERN_ERR "Data structure version mismatch. Userspace "
473 "tools must match eCryptfs kernel module with major " 480 "tools must match eCryptfs kernel module with major "
diff --git a/fs/exec.c b/fs/exec.c
index 5470d3c1892a..3e14ba25f678 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1802,6 +1802,7 @@ static int do_execveat_common(int fd, struct filename *filename,
1802 /* execve succeeded */ 1802 /* execve succeeded */
1803 current->fs->in_exec = 0; 1803 current->fs->in_exec = 0;
1804 current->in_execve = 0; 1804 current->in_execve = 0;
1805 membarrier_execve(current);
1805 acct_update_integrals(current); 1806 acct_update_integrals(current);
1806 task_numa_free(current); 1807 task_numa_free(current);
1807 free_bprm(bprm); 1808 free_bprm(bprm);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b104096fce9e..b0915b734a38 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1677,7 +1677,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1677 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED; 1677 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
1678 return 1; 1678 return 1;
1679 case Opt_i_version: 1679 case Opt_i_version:
1680 sb->s_flags |= MS_I_VERSION; 1680 sb->s_flags |= SB_I_VERSION;
1681 return 1; 1681 return 1;
1682 case Opt_lazytime: 1682 case Opt_lazytime:
1683 sb->s_flags |= MS_LAZYTIME; 1683 sb->s_flags |= MS_LAZYTIME;
@@ -2060,7 +2060,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
2060 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); 2060 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2061 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) 2061 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2062 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); 2062 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
2063 if (sb->s_flags & MS_I_VERSION) 2063 if (sb->s_flags & SB_I_VERSION)
2064 SEQ_OPTS_PUTS("i_version"); 2064 SEQ_OPTS_PUTS("i_version");
2065 if (nodefs || sbi->s_stripe) 2065 if (nodefs || sbi->s_stripe)
2066 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); 2066 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index b5ab06fabc60..0438d4cd91ef 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -331,6 +331,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
331 rcu_read_lock(); 331 rcu_read_lock();
332 332
333 confkey = user_key_payload_rcu(key); 333 confkey = user_key_payload_rcu(key);
334 if (!confkey) {
335 /* key was revoked */
336 rcu_read_unlock();
337 key_put(key);
338 goto no_config;
339 }
340
334 buf = confkey->data; 341 buf = confkey->data;
335 342
336 for (len = confkey->datalen - 1; len >= 0; len--) { 343 for (len = confkey->datalen - 1; len >= 0; len--) {
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 65c88379a3a1..94a745acaef8 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1059,7 +1059,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1059 if (sb->s_flags & MS_MANDLOCK) 1059 if (sb->s_flags & MS_MANDLOCK)
1060 goto err; 1060 goto err;
1061 1061
1062 sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION); 1062 sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION);
1063 1063
1064 if (!parse_fuse_opt(data, &d, is_bdev)) 1064 if (!parse_fuse_opt(data, &d, is_bdev))
1065 goto err; 1065 goto err;
diff --git a/fs/iomap.c b/fs/iomap.c
index be61cf742b5e..d4801f8dd4fd 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -714,23 +714,9 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
714{ 714{
715 struct kiocb *iocb = dio->iocb; 715 struct kiocb *iocb = dio->iocb;
716 struct inode *inode = file_inode(iocb->ki_filp); 716 struct inode *inode = file_inode(iocb->ki_filp);
717 loff_t offset = iocb->ki_pos;
717 ssize_t ret; 718 ssize_t ret;
718 719
719 /*
720 * Try again to invalidate clean pages which might have been cached by
721 * non-direct readahead, or faulted in by get_user_pages() if the source
722 * of the write was an mmap'ed region of the file we're writing. Either
723 * one is a pretty crazy thing to do, so we don't support it 100%. If
724 * this invalidation fails, tough, the write still worked...
725 */
726 if (!dio->error &&
727 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
728 ret = invalidate_inode_pages2_range(inode->i_mapping,
729 iocb->ki_pos >> PAGE_SHIFT,
730 (iocb->ki_pos + dio->size - 1) >> PAGE_SHIFT);
731 WARN_ON_ONCE(ret);
732 }
733
734 if (dio->end_io) { 720 if (dio->end_io) {
735 ret = dio->end_io(iocb, 721 ret = dio->end_io(iocb,
736 dio->error ? dio->error : dio->size, 722 dio->error ? dio->error : dio->size,
@@ -742,12 +728,33 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
742 if (likely(!ret)) { 728 if (likely(!ret)) {
743 ret = dio->size; 729 ret = dio->size;
744 /* check for short read */ 730 /* check for short read */
745 if (iocb->ki_pos + ret > dio->i_size && 731 if (offset + ret > dio->i_size &&
746 !(dio->flags & IOMAP_DIO_WRITE)) 732 !(dio->flags & IOMAP_DIO_WRITE))
747 ret = dio->i_size - iocb->ki_pos; 733 ret = dio->i_size - offset;
748 iocb->ki_pos += ret; 734 iocb->ki_pos += ret;
749 } 735 }
750 736
737 /*
738 * Try again to invalidate clean pages which might have been cached by
739 * non-direct readahead, or faulted in by get_user_pages() if the source
740 * of the write was an mmap'ed region of the file we're writing. Either
741 * one is a pretty crazy thing to do, so we don't support it 100%. If
742 * this invalidation fails, tough, the write still worked...
743 *
744 * And this page cache invalidation has to be after dio->end_io(), as
745 * some filesystems convert unwritten extents to real allocations in
746 * end_io() when necessary, otherwise a racing buffer read would cache
747 * zeros from unwritten extents.
748 */
749 if (!dio->error &&
750 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
751 int err;
752 err = invalidate_inode_pages2_range(inode->i_mapping,
753 offset >> PAGE_SHIFT,
754 (offset + dio->size - 1) >> PAGE_SHIFT);
755 WARN_ON_ONCE(err);
756 }
757
751 inode_dio_end(file_inode(iocb->ki_filp)); 758 inode_dio_end(file_inode(iocb->ki_filp));
752 kfree(dio); 759 kfree(dio);
753 760
diff --git a/fs/namespace.c b/fs/namespace.c
index 3b601f115b6c..d18deb4c410b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2825,7 +2825,8 @@ long do_mount(const char *dev_name, const char __user *dir_name,
2825 SB_MANDLOCK | 2825 SB_MANDLOCK |
2826 SB_DIRSYNC | 2826 SB_DIRSYNC |
2827 SB_SILENT | 2827 SB_SILENT |
2828 SB_POSIXACL); 2828 SB_POSIXACL |
2829 SB_I_VERSION);
2829 2830
2830 if (flags & MS_REMOUNT) 2831 if (flags & MS_REMOUNT)
2831 retval = do_remount(&path, flags, sb_flags, mnt_flags, 2832 retval = do_remount(&path, flags, sb_flags, mnt_flags,
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index def32fa1c225..89263797cf32 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3852,6 +3852,17 @@ xfs_trim_extent(
3852 } 3852 }
3853} 3853}
3854 3854
3855/* trim extent to within eof */
3856void
3857xfs_trim_extent_eof(
3858 struct xfs_bmbt_irec *irec,
3859 struct xfs_inode *ip)
3860
3861{
3862 xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
3863 i_size_read(VFS_I(ip))));
3864}
3865
3855/* 3866/*
3856 * Trim the returned map to the required bounds 3867 * Trim the returned map to the required bounds
3857 */ 3868 */
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 851982a5dfbc..502e0d8fb4ff 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -208,6 +208,7 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
208 208
209void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno, 209void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
210 xfs_filblks_t len); 210 xfs_filblks_t len);
211void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
211int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd); 212int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
212void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork); 213void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
213void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops, 214void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index f18e5932aec4..a3eeaba156c5 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -446,6 +446,19 @@ xfs_imap_valid(
446{ 446{
447 offset >>= inode->i_blkbits; 447 offset >>= inode->i_blkbits;
448 448
449 /*
450 * We have to make sure the cached mapping is within EOF to protect
451 * against eofblocks trimming on file release leaving us with a stale
452 * mapping. Otherwise, a page for a subsequent file extending buffered
453 * write could get picked up by this writeback cycle and written to the
454 * wrong blocks.
455 *
456 * Note that what we really want here is a generic mapping invalidation
457 * mechanism to protect us from arbitrary extent modifying contexts, not
458 * just eofblocks.
459 */
460 xfs_trim_extent_eof(imap, XFS_I(inode));
461
449 return offset >= imap->br_startoff && 462 return offset >= imap->br_startoff &&
450 offset < imap->br_startoff + imap->br_blockcount; 463 offset < imap->br_startoff + imap->br_blockcount;
451} 464}
@@ -735,6 +748,14 @@ xfs_vm_invalidatepage(
735{ 748{
736 trace_xfs_invalidatepage(page->mapping->host, page, offset, 749 trace_xfs_invalidatepage(page->mapping->host, page, offset,
737 length); 750 length);
751
752 /*
753 * If we are invalidating the entire page, clear the dirty state from it
754 * so that we can check for attempts to release dirty cached pages in
755 * xfs_vm_releasepage().
756 */
757 if (offset == 0 && length >= PAGE_SIZE)
758 cancel_dirty_page(page);
738 block_invalidatepage(page, offset, length); 759 block_invalidatepage(page, offset, length);
739} 760}
740 761
@@ -1190,25 +1211,27 @@ xfs_vm_releasepage(
1190 * mm accommodates an old ext3 case where clean pages might not have had 1211 * mm accommodates an old ext3 case where clean pages might not have had
1191 * the dirty bit cleared. Thus, it can send actual dirty pages to 1212 * the dirty bit cleared. Thus, it can send actual dirty pages to
1192 * ->releasepage() via shrink_active_list(). Conversely, 1213 * ->releasepage() via shrink_active_list(). Conversely,
1193 * block_invalidatepage() can send pages that are still marked dirty 1214 * block_invalidatepage() can send pages that are still marked dirty but
1194 * but otherwise have invalidated buffers. 1215 * otherwise have invalidated buffers.
1195 * 1216 *
1196 * We want to release the latter to avoid unnecessary buildup of the 1217 * We want to release the latter to avoid unnecessary buildup of the
1197 * LRU, skip the former and warn if we've left any lingering 1218 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
1198 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc 1219 * that are entirely invalidated and need to be released. Hence the
1199 * or unwritten buffers and warn if the page is not dirty. Otherwise 1220 * only time we should get dirty pages here is through
1200 * try to release the buffers. 1221 * shrink_active_list() and so we can simply skip those now.
1222 *
1223 * warn if we've left any lingering delalloc/unwritten buffers on clean
1224 * or invalidated pages we are about to release.
1201 */ 1225 */
1226 if (PageDirty(page))
1227 return 0;
1228
1202 xfs_count_page_state(page, &delalloc, &unwritten); 1229 xfs_count_page_state(page, &delalloc, &unwritten);
1203 1230
1204 if (delalloc) { 1231 if (WARN_ON_ONCE(delalloc))
1205 WARN_ON_ONCE(!PageDirty(page));
1206 return 0; 1232 return 0;
1207 } 1233 if (WARN_ON_ONCE(unwritten))
1208 if (unwritten) {
1209 WARN_ON_ONCE(!PageDirty(page));
1210 return 0; 1234 return 0;
1211 }
1212 1235
1213 return try_to_free_buffers(page); 1236 return try_to_free_buffers(page);
1214} 1237}
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 560e0b40ac1b..43cfc07996a4 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -367,29 +367,6 @@ xfs_getfsmap_datadev_helper(
367 return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr); 367 return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
368} 368}
369 369
370/* Transform a rtbitmap "record" into a fsmap */
371STATIC int
372xfs_getfsmap_rtdev_rtbitmap_helper(
373 struct xfs_trans *tp,
374 struct xfs_rtalloc_rec *rec,
375 void *priv)
376{
377 struct xfs_mount *mp = tp->t_mountp;
378 struct xfs_getfsmap_info *info = priv;
379 struct xfs_rmap_irec irec;
380 xfs_daddr_t rec_daddr;
381
382 rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
383
384 irec.rm_startblock = rec->ar_startblock;
385 irec.rm_blockcount = rec->ar_blockcount;
386 irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
387 irec.rm_offset = 0;
388 irec.rm_flags = 0;
389
390 return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
391}
392
393/* Transform a bnobt irec into a fsmap */ 370/* Transform a bnobt irec into a fsmap */
394STATIC int 371STATIC int
395xfs_getfsmap_datadev_bnobt_helper( 372xfs_getfsmap_datadev_bnobt_helper(
@@ -475,6 +452,30 @@ xfs_getfsmap_logdev(
475 return xfs_getfsmap_helper(tp, info, &rmap, 0); 452 return xfs_getfsmap_helper(tp, info, &rmap, 0);
476} 453}
477 454
455#ifdef CONFIG_XFS_RT
456/* Transform a rtbitmap "record" into a fsmap */
457STATIC int
458xfs_getfsmap_rtdev_rtbitmap_helper(
459 struct xfs_trans *tp,
460 struct xfs_rtalloc_rec *rec,
461 void *priv)
462{
463 struct xfs_mount *mp = tp->t_mountp;
464 struct xfs_getfsmap_info *info = priv;
465 struct xfs_rmap_irec irec;
466 xfs_daddr_t rec_daddr;
467
468 rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
469
470 irec.rm_startblock = rec->ar_startblock;
471 irec.rm_blockcount = rec->ar_blockcount;
472 irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
473 irec.rm_offset = 0;
474 irec.rm_flags = 0;
475
476 return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
477}
478
478/* Execute a getfsmap query against the realtime device. */ 479/* Execute a getfsmap query against the realtime device. */
479STATIC int 480STATIC int
480__xfs_getfsmap_rtdev( 481__xfs_getfsmap_rtdev(
@@ -521,7 +522,6 @@ __xfs_getfsmap_rtdev(
521 return query_fn(tp, info); 522 return query_fn(tp, info);
522} 523}
523 524
524#ifdef CONFIG_XFS_RT
525/* Actually query the realtime bitmap. */ 525/* Actually query the realtime bitmap. */
526STATIC int 526STATIC int
527xfs_getfsmap_rtdev_rtbitmap_query( 527xfs_getfsmap_rtdev_rtbitmap_query(
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 584cf2d573ba..f663022353c0 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1637,7 +1637,7 @@ xfs_fs_fill_super(
1637 1637
1638 /* version 5 superblocks support inode version counters. */ 1638 /* version 5 superblocks support inode version counters. */
1639 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) 1639 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1640 sb->s_flags |= MS_I_VERSION; 1640 sb->s_flags |= SB_I_VERSION;
1641 1641
1642 if (mp->m_flags & XFS_MOUNT_DAX) { 1642 if (mp->m_flags & XFS_MOUNT_DAX) {
1643 xfs_warn(mp, 1643 xfs_warn(mp,
diff --git a/include/linux/filter.h b/include/linux/filter.h
index d29e58fde364..818a0b26249e 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -728,7 +728,7 @@ void xdp_do_flush_map(void);
728void bpf_warn_invalid_xdp_action(u32 act); 728void bpf_warn_invalid_xdp_action(u32 act);
729void bpf_warn_invalid_xdp_redirect(u32 ifindex); 729void bpf_warn_invalid_xdp_redirect(u32 ifindex);
730 730
731struct sock *do_sk_redirect_map(void); 731struct sock *do_sk_redirect_map(struct sk_buff *skb);
732 732
733#ifdef CONFIG_BPF_JIT 733#ifdef CONFIG_BPF_JIT
734extern int bpf_jit_enable; 734extern int bpf_jit_enable;
diff --git a/include/linux/input.h b/include/linux/input.h
index fb5e23c7ed98..7c7516eb7d76 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -234,6 +234,10 @@ struct input_dev {
234#error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match" 234#error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match"
235#endif 235#endif
236 236
237#if INPUT_PROP_MAX != INPUT_DEVICE_ID_PROP_MAX
238#error "INPUT_PROP_MAX and INPUT_DEVICE_ID_PROP_MAX do not match"
239#endif
240
237#define INPUT_DEVICE_ID_MATCH_DEVICE \ 241#define INPUT_DEVICE_ID_MATCH_DEVICE \
238 (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT) 242 (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT)
239#define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \ 243#define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
@@ -469,6 +473,9 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke);
469int input_set_keycode(struct input_dev *dev, 473int input_set_keycode(struct input_dev *dev,
470 const struct input_keymap_entry *ke); 474 const struct input_keymap_entry *ke);
471 475
476bool input_match_device_id(const struct input_dev *dev,
477 const struct input_device_id *id);
478
472void input_enable_softrepeat(struct input_dev *dev, int delay, int period); 479void input_enable_softrepeat(struct input_dev *dev, int delay, int period);
473 480
474extern struct class input_class; 481extern struct class input_class;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d4728bf6a537..5ad10948ea95 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1009,7 +1009,7 @@ void irq_gc_mask_clr_bit(struct irq_data *d);
1009void irq_gc_unmask_enable_reg(struct irq_data *d); 1009void irq_gc_unmask_enable_reg(struct irq_data *d);
1010void irq_gc_ack_set_bit(struct irq_data *d); 1010void irq_gc_ack_set_bit(struct irq_data *d);
1011void irq_gc_ack_clr_bit(struct irq_data *d); 1011void irq_gc_ack_clr_bit(struct irq_data *d);
1012void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); 1012void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
1013void irq_gc_eoi(struct irq_data *d); 1013void irq_gc_eoi(struct irq_data *d);
1014int irq_gc_set_wake(struct irq_data *d, unsigned int on); 1014int irq_gc_set_wake(struct irq_data *d, unsigned int on);
1015 1015
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 1ea576c8126f..14b74f22d43c 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -372,6 +372,8 @@
372#define GITS_BASER_ENTRY_SIZE_SHIFT (48) 372#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
373#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) 373#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
374#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) 374#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
375#define GITS_BASER_PHYS_52_to_48(phys) \
376 (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
375#define GITS_BASER_SHAREABILITY_SHIFT (10) 377#define GITS_BASER_SHAREABILITY_SHIFT (10)
376#define GITS_BASER_InnerShareable \ 378#define GITS_BASER_InnerShareable \
377 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) 379 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
diff --git a/include/linux/key.h b/include/linux/key.h
index e315e16b6ff8..8a15cabe928d 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -138,6 +138,11 @@ struct key_restriction {
138 struct key_type *keytype; 138 struct key_type *keytype;
139}; 139};
140 140
141enum key_state {
142 KEY_IS_UNINSTANTIATED,
143 KEY_IS_POSITIVE, /* Positively instantiated */
144};
145
141/*****************************************************************************/ 146/*****************************************************************************/
142/* 147/*
143 * authentication token / access credential / keyring 148 * authentication token / access credential / keyring
@@ -169,6 +174,7 @@ struct key {
169 * - may not match RCU dereferenced payload 174 * - may not match RCU dereferenced payload
170 * - payload should contain own length 175 * - payload should contain own length
171 */ 176 */
177 short state; /* Key state (+) or rejection error (-) */
172 178
173#ifdef KEY_DEBUGGING 179#ifdef KEY_DEBUGGING
174 unsigned magic; 180 unsigned magic;
@@ -176,18 +182,16 @@ struct key {
176#endif 182#endif
177 183
178 unsigned long flags; /* status flags (change with bitops) */ 184 unsigned long flags; /* status flags (change with bitops) */
179#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */ 185#define KEY_FLAG_DEAD 0 /* set if key type has been deleted */
180#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */ 186#define KEY_FLAG_REVOKED 1 /* set if key had been revoked */
181#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */ 187#define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */
182#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */ 188#define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */
183#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */ 189#define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */
184#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ 190#define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */
185#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */ 191#define KEY_FLAG_BUILTIN 6 /* set if key is built in to the kernel */
186#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */ 192#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
187#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */ 193#define KEY_FLAG_KEEP 8 /* set if key should not be removed */
188#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */ 194#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
189#define KEY_FLAG_KEEP 10 /* set if key should not be removed */
190#define KEY_FLAG_UID_KEYRING 11 /* set if key is a user or user session keyring */
191 195
192 /* the key type and key description string 196 /* the key type and key description string
193 * - the desc is used to match a key against search criteria 197 * - the desc is used to match a key against search criteria
@@ -213,7 +217,6 @@ struct key {
213 struct list_head name_link; 217 struct list_head name_link;
214 struct assoc_array keys; 218 struct assoc_array keys;
215 }; 219 };
216 int reject_error;
217 }; 220 };
218 221
219 /* This is set on a keyring to restrict the addition of a link to a key 222 /* This is set on a keyring to restrict the addition of a link to a key
@@ -353,17 +356,27 @@ extern void key_set_timeout(struct key *, unsigned);
353#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */ 356#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
354#define KEY_NEED_ALL 0x3f /* All the above permissions */ 357#define KEY_NEED_ALL 0x3f /* All the above permissions */
355 358
359static inline short key_read_state(const struct key *key)
360{
361 /* Barrier versus mark_key_instantiated(). */
362 return smp_load_acquire(&key->state);
363}
364
356/** 365/**
357 * key_is_instantiated - Determine if a key has been positively instantiated 366 * key_is_positive - Determine if a key has been positively instantiated
358 * @key: The key to check. 367 * @key: The key to check.
359 * 368 *
360 * Return true if the specified key has been positively instantiated, false 369 * Return true if the specified key has been positively instantiated, false
361 * otherwise. 370 * otherwise.
362 */ 371 */
363static inline bool key_is_instantiated(const struct key *key) 372static inline bool key_is_positive(const struct key *key)
373{
374 return key_read_state(key) == KEY_IS_POSITIVE;
375}
376
377static inline bool key_is_negative(const struct key *key)
364{ 378{
365 return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && 379 return key_read_state(key) < 0;
366 !test_bit(KEY_FLAG_NEGATIVE, &key->flags);
367} 380}
368 381
369#define dereference_key_rcu(KEY) \ 382#define dereference_key_rcu(KEY) \
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 0d3f14fd2621..4773145246ed 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -31,8 +31,8 @@ struct mbus_dram_target_info
31 struct mbus_dram_window { 31 struct mbus_dram_window {
32 u8 cs_index; 32 u8 cs_index;
33 u8 mbus_attr; 33 u8 mbus_attr;
34 u32 base; 34 u64 base;
35 u32 size; 35 u64 size;
36 } cs[4]; 36 } cs[4];
37}; 37};
38 38
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 46f4ecf5479a..1861ea8dba77 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -445,6 +445,9 @@ struct mm_struct {
445 unsigned long flags; /* Must use atomic bitops to access the bits */ 445 unsigned long flags; /* Must use atomic bitops to access the bits */
446 446
447 struct core_state *core_state; /* coredumping support */ 447 struct core_state *core_state; /* coredumping support */
448#ifdef CONFIG_MEMBARRIER
449 atomic_t membarrier_state;
450#endif
448#ifdef CONFIG_AIO 451#ifdef CONFIG_AIO
449 spinlock_t ioctx_lock; 452 spinlock_t ioctx_lock;
450 struct kioctx_table __rcu *ioctx_table; 453 struct kioctx_table __rcu *ioctx_table;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 694cebb50f72..2657f9f51536 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -293,6 +293,7 @@ struct pcmcia_device_id {
293#define INPUT_DEVICE_ID_SND_MAX 0x07 293#define INPUT_DEVICE_ID_SND_MAX 0x07
294#define INPUT_DEVICE_ID_FF_MAX 0x7f 294#define INPUT_DEVICE_ID_FF_MAX 0x7f
295#define INPUT_DEVICE_ID_SW_MAX 0x0f 295#define INPUT_DEVICE_ID_SW_MAX 0x0f
296#define INPUT_DEVICE_ID_PROP_MAX 0x1f
296 297
297#define INPUT_DEVICE_ID_MATCH_BUS 1 298#define INPUT_DEVICE_ID_MATCH_BUS 1
298#define INPUT_DEVICE_ID_MATCH_VENDOR 2 299#define INPUT_DEVICE_ID_MATCH_VENDOR 2
@@ -308,6 +309,7 @@ struct pcmcia_device_id {
308#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400 309#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400
309#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800 310#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800
310#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000 311#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000
312#define INPUT_DEVICE_ID_MATCH_PROPBIT 0x2000
311 313
312struct input_device_id { 314struct input_device_id {
313 315
@@ -327,6 +329,7 @@ struct input_device_id {
327 kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1]; 329 kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1];
328 kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1]; 330 kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1];
329 kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1]; 331 kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1];
332 kernel_ulong_t propbit[INPUT_DEVICE_ID_PROP_MAX / BITS_PER_LONG + 1];
330 333
331 kernel_ulong_t driver_info; 334 kernel_ulong_t driver_info;
332}; 335};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f535779d9dc1..2eaac7d75af4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3694,6 +3694,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
3694 unsigned char name_assign_type, 3694 unsigned char name_assign_type,
3695 void (*setup)(struct net_device *), 3695 void (*setup)(struct net_device *),
3696 unsigned int txqs, unsigned int rxqs); 3696 unsigned int txqs, unsigned int rxqs);
3697int dev_get_valid_name(struct net *net, struct net_device *dev,
3698 const char *name);
3699
3697#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ 3700#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
3698 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) 3701 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
3699 3702
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index b1fd8bf85fdc..2bea1d5e9930 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -276,7 +276,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
276#define list_entry_rcu(ptr, type, member) \ 276#define list_entry_rcu(ptr, type, member) \
277 container_of(lockless_dereference(ptr), type, member) 277 container_of(lockless_dereference(ptr), type, member)
278 278
279/** 279/*
280 * Where are list_empty_rcu() and list_first_entry_rcu()? 280 * Where are list_empty_rcu() and list_first_entry_rcu()?
281 * 281 *
282 * Implementing those functions following their counterparts list_empty() and 282 * Implementing those functions following their counterparts list_empty() and
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index de50d8a4cf41..1a9f70d44af9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -523,7 +523,7 @@ static inline void rcu_preempt_sleep_check(void) { }
523 * Return the value of the specified RCU-protected pointer, but omit 523 * Return the value of the specified RCU-protected pointer, but omit
524 * both the smp_read_barrier_depends() and the READ_ONCE(). This 524 * both the smp_read_barrier_depends() and the READ_ONCE(). This
525 * is useful in cases where update-side locks prevent the value of the 525 * is useful in cases where update-side locks prevent the value of the
526 * pointer from changing. Please note that this primitive does -not- 526 * pointer from changing. Please note that this primitive does *not*
527 * prevent the compiler from repeating this reference or combining it 527 * prevent the compiler from repeating this reference or combining it
528 * with other references, so it should not be used without protection 528 * with other references, so it should not be used without protection
529 * of appropriate locks. 529 * of appropriate locks.
@@ -568,7 +568,7 @@ static inline void rcu_preempt_sleep_check(void) { }
568 * is handed off from RCU to some other synchronization mechanism, for 568 * is handed off from RCU to some other synchronization mechanism, for
569 * example, reference counting or locking. In C11, it would map to 569 * example, reference counting or locking. In C11, it would map to
570 * kill_dependency(). It could be used as follows: 570 * kill_dependency(). It could be used as follows:
571 * 571 * ``
572 * rcu_read_lock(); 572 * rcu_read_lock();
573 * p = rcu_dereference(gp); 573 * p = rcu_dereference(gp);
574 * long_lived = is_long_lived(p); 574 * long_lived = is_long_lived(p);
@@ -579,6 +579,7 @@ static inline void rcu_preempt_sleep_check(void) { }
579 * p = rcu_pointer_handoff(p); 579 * p = rcu_pointer_handoff(p);
580 * } 580 * }
581 * rcu_read_unlock(); 581 * rcu_read_unlock();
582 *``
582 */ 583 */
583#define rcu_pointer_handoff(p) (p) 584#define rcu_pointer_handoff(p) (p)
584 585
@@ -778,18 +779,21 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
778 779
779/** 780/**
780 * RCU_INIT_POINTER() - initialize an RCU protected pointer 781 * RCU_INIT_POINTER() - initialize an RCU protected pointer
782 * @p: The pointer to be initialized.
783 * @v: The value to initialized the pointer to.
781 * 784 *
782 * Initialize an RCU-protected pointer in special cases where readers 785 * Initialize an RCU-protected pointer in special cases where readers
783 * do not need ordering constraints on the CPU or the compiler. These 786 * do not need ordering constraints on the CPU or the compiler. These
784 * special cases are: 787 * special cases are:
785 * 788 *
786 * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- 789 * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
787 * 2. The caller has taken whatever steps are required to prevent 790 * 2. The caller has taken whatever steps are required to prevent
788 * RCU readers from concurrently accessing this pointer -or- 791 * RCU readers from concurrently accessing this pointer *or*
789 * 3. The referenced data structure has already been exposed to 792 * 3. The referenced data structure has already been exposed to
790 * readers either at compile time or via rcu_assign_pointer() -and- 793 * readers either at compile time or via rcu_assign_pointer() *and*
791 * a. You have not made -any- reader-visible changes to 794 *
792 * this structure since then -or- 795 * a. You have not made *any* reader-visible changes to
796 * this structure since then *or*
793 * b. It is OK for readers accessing this structure from its 797 * b. It is OK for readers accessing this structure from its
794 * new location to see the old state of the structure. (For 798 * new location to see the old state of the structure. (For
795 * example, the changes were to statistical counters or to 799 * example, the changes were to statistical counters or to
@@ -805,7 +809,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
805 * by a single external-to-structure RCU-protected pointer, then you may 809 * by a single external-to-structure RCU-protected pointer, then you may
806 * use RCU_INIT_POINTER() to initialize the internal RCU-protected 810 * use RCU_INIT_POINTER() to initialize the internal RCU-protected
807 * pointers, but you must use rcu_assign_pointer() to initialize the 811 * pointers, but you must use rcu_assign_pointer() to initialize the
808 * external-to-structure pointer -after- you have completely initialized 812 * external-to-structure pointer *after* you have completely initialized
809 * the reader-accessible portions of the linked structure. 813 * the reader-accessible portions of the linked structure.
810 * 814 *
811 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no 815 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
@@ -819,6 +823,8 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
819 823
820/** 824/**
821 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer 825 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
826 * @p: The pointer to be initialized.
827 * @v: The value to initialized the pointer to.
822 * 828 *
823 * GCC-style initialization for an RCU-protected pointer in a structure field. 829 * GCC-style initialization for an RCU-protected pointer in a structure field.
824 */ 830 */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index ae53e413fb13..ab9bf7b73954 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -211,4 +211,20 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
211 current->flags = (current->flags & ~PF_MEMALLOC) | flags; 211 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
212} 212}
213 213
214#ifdef CONFIG_MEMBARRIER
215enum {
216 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
217 MEMBARRIER_STATE_SWITCH_MM = (1U << 1),
218};
219
220static inline void membarrier_execve(struct task_struct *t)
221{
222 atomic_set(&t->mm->membarrier_state, 0);
223}
224#else
225static inline void membarrier_execve(struct task_struct *t)
226{
227}
228#endif
229
214#endif /* _LINUX_SCHED_MM_H */ 230#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 39af9bc0f653..62be8966e837 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -78,6 +78,7 @@ void synchronize_srcu(struct srcu_struct *sp);
78 78
79/** 79/**
80 * srcu_read_lock_held - might we be in SRCU read-side critical section? 80 * srcu_read_lock_held - might we be in SRCU read-side critical section?
81 * @sp: The srcu_struct structure to check
81 * 82 *
82 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU 83 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
83 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 84 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index aa95053dfc78..425752f768d2 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -96,7 +96,7 @@ struct inet_request_sock {
96 kmemcheck_bitfield_end(flags); 96 kmemcheck_bitfield_end(flags);
97 u32 ir_mark; 97 u32 ir_mark;
98 union { 98 union {
99 struct ip_options_rcu *opt; 99 struct ip_options_rcu __rcu *ireq_opt;
100#if IS_ENABLED(CONFIG_IPV6) 100#if IS_ENABLED(CONFIG_IPV6)
101 struct { 101 struct {
102 struct ipv6_txoptions *ipv6_opt; 102 struct ipv6_txoptions *ipv6_opt;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 89974c5286d8..b1ef98ebce53 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -840,6 +840,11 @@ struct tcp_skb_cb {
840 struct inet6_skb_parm h6; 840 struct inet6_skb_parm h6;
841#endif 841#endif
842 } header; /* For incoming skbs */ 842 } header; /* For incoming skbs */
843 struct {
844 __u32 key;
845 __u32 flags;
846 struct bpf_map *map;
847 } bpf;
843 }; 848 };
844}; 849};
845 850
diff --git a/include/sound/control.h b/include/sound/control.h
index bd7246de58e7..a1f1152bc687 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -248,6 +248,9 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
248 void *private_data); 248 void *private_data);
249void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only); 249void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
250#define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true) 250#define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true)
251int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
252 int (*func)(struct snd_kcontrol *, void *),
253 void *arg);
251 254
252/* 255/*
253 * Helper functions for jack-detection controls 256 * Helper functions for jack-detection controls
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
index 6d47b3249d8a..4e01ad7ffe98 100644
--- a/include/uapi/linux/membarrier.h
+++ b/include/uapi/linux/membarrier.h
@@ -52,21 +52,30 @@
52 * (non-running threads are de facto in such a 52 * (non-running threads are de facto in such a
53 * state). This only covers threads from the 53 * state). This only covers threads from the
54 * same processes as the caller thread. This 54 * same processes as the caller thread. This
55 * command returns 0. The "expedited" commands 55 * command returns 0 on success. The
56 * complete faster than the non-expedited ones, 56 * "expedited" commands complete faster than
57 * they never block, but have the downside of 57 * the non-expedited ones, they never block,
58 * causing extra overhead. 58 * but have the downside of causing extra
59 * overhead. A process needs to register its
60 * intent to use the private expedited command
61 * prior to using it, otherwise this command
62 * returns -EPERM.
63 * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
64 * Register the process intent to use
65 * MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always
66 * returns 0.
59 * 67 *
60 * Command to be passed to the membarrier system call. The commands need to 68 * Command to be passed to the membarrier system call. The commands need to
61 * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to 69 * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
62 * the value 0. 70 * the value 0.
63 */ 71 */
64enum membarrier_cmd { 72enum membarrier_cmd {
65 MEMBARRIER_CMD_QUERY = 0, 73 MEMBARRIER_CMD_QUERY = 0,
66 MEMBARRIER_CMD_SHARED = (1 << 0), 74 MEMBARRIER_CMD_SHARED = (1 << 0),
67 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ 75 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
68 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ 76 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
69 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), 77 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
78 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
70}; 79};
71 80
72#endif /* _UAPI_LINUX_MEMBARRIER_H */ 81#endif /* _UAPI_LINUX_MEMBARRIER_H */
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 98c0f00c3f5e..e2636737b69b 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -98,7 +98,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
98 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus(); 98 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
99 99
100 if (array_size >= U32_MAX - PAGE_SIZE || 100 if (array_size >= U32_MAX - PAGE_SIZE ||
101 elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { 101 bpf_array_alloc_percpu(array)) {
102 bpf_map_area_free(array); 102 bpf_map_area_free(array);
103 return ERR_PTR(-ENOMEM); 103 return ERR_PTR(-ENOMEM);
104 } 104 }
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index e093d9a2c4dd..e745d6a88224 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -69,7 +69,7 @@ static LIST_HEAD(dev_map_list);
69 69
70static u64 dev_map_bitmap_size(const union bpf_attr *attr) 70static u64 dev_map_bitmap_size(const union bpf_attr *attr)
71{ 71{
72 return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long); 72 return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
73} 73}
74 74
75static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 75static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
@@ -78,6 +78,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
78 int err = -EINVAL; 78 int err = -EINVAL;
79 u64 cost; 79 u64 cost;
80 80
81 if (!capable(CAP_NET_ADMIN))
82 return ERR_PTR(-EPERM);
83
81 /* check sanity of attributes */ 84 /* check sanity of attributes */
82 if (attr->max_entries == 0 || attr->key_size != 4 || 85 if (attr->max_entries == 0 || attr->key_size != 4 ||
83 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) 86 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
@@ -111,8 +114,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
111 err = -ENOMEM; 114 err = -ENOMEM;
112 115
113 /* A per cpu bitfield with a bit per possible net device */ 116 /* A per cpu bitfield with a bit per possible net device */
114 dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr), 117 dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
115 __alignof__(unsigned long)); 118 __alignof__(unsigned long),
119 GFP_KERNEL | __GFP_NOWARN);
116 if (!dtab->flush_needed) 120 if (!dtab->flush_needed)
117 goto free_dtab; 121 goto free_dtab;
118 122
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 431126f31ea3..6533f08d1238 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -317,10 +317,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
317 */ 317 */
318 goto free_htab; 318 goto free_htab;
319 319
320 if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
321 /* make sure the size for pcpu_alloc() is reasonable */
322 goto free_htab;
323
324 htab->elem_size = sizeof(struct htab_elem) + 320 htab->elem_size = sizeof(struct htab_elem) +
325 round_up(htab->map.key_size, 8); 321 round_up(htab->map.key_size, 8);
326 if (percpu) 322 if (percpu)
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 6424ce0e4969..2b6eb35ae5d3 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -39,6 +39,7 @@
39#include <linux/workqueue.h> 39#include <linux/workqueue.h>
40#include <linux/list.h> 40#include <linux/list.h>
41#include <net/strparser.h> 41#include <net/strparser.h>
42#include <net/tcp.h>
42 43
43struct bpf_stab { 44struct bpf_stab {
44 struct bpf_map map; 45 struct bpf_map map;
@@ -101,9 +102,16 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
101 return SK_DROP; 102 return SK_DROP;
102 103
103 skb_orphan(skb); 104 skb_orphan(skb);
105 /* We need to ensure that BPF metadata for maps is also cleared
106 * when we orphan the skb so that we don't have the possibility
107 * to reference a stale map.
108 */
109 TCP_SKB_CB(skb)->bpf.map = NULL;
104 skb->sk = psock->sock; 110 skb->sk = psock->sock;
105 bpf_compute_data_end(skb); 111 bpf_compute_data_end(skb);
112 preempt_disable();
106 rc = (*prog->bpf_func)(skb, prog->insnsi); 113 rc = (*prog->bpf_func)(skb, prog->insnsi);
114 preempt_enable();
107 skb->sk = NULL; 115 skb->sk = NULL;
108 116
109 return rc; 117 return rc;
@@ -114,17 +122,10 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
114 struct sock *sk; 122 struct sock *sk;
115 int rc; 123 int rc;
116 124
117 /* Because we use per cpu values to feed input from sock redirect
118 * in BPF program to do_sk_redirect_map() call we need to ensure we
119 * are not preempted. RCU read lock is not sufficient in this case
120 * with CONFIG_PREEMPT_RCU enabled so we must be explicit here.
121 */
122 preempt_disable();
123 rc = smap_verdict_func(psock, skb); 125 rc = smap_verdict_func(psock, skb);
124 switch (rc) { 126 switch (rc) {
125 case SK_REDIRECT: 127 case SK_REDIRECT:
126 sk = do_sk_redirect_map(); 128 sk = do_sk_redirect_map(skb);
127 preempt_enable();
128 if (likely(sk)) { 129 if (likely(sk)) {
129 struct smap_psock *peer = smap_psock_sk(sk); 130 struct smap_psock *peer = smap_psock_sk(sk);
130 131
@@ -141,8 +142,6 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
141 /* Fall through and free skb otherwise */ 142 /* Fall through and free skb otherwise */
142 case SK_DROP: 143 case SK_DROP:
143 default: 144 default:
144 if (rc != SK_REDIRECT)
145 preempt_enable();
146 kfree_skb(skb); 145 kfree_skb(skb);
147 } 146 }
148} 147}
@@ -487,6 +486,9 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
487 int err = -EINVAL; 486 int err = -EINVAL;
488 u64 cost; 487 u64 cost;
489 488
489 if (!capable(CAP_NET_ADMIN))
490 return ERR_PTR(-EPERM);
491
490 /* check sanity of attributes */ 492 /* check sanity of attributes */
491 if (attr->max_entries == 0 || attr->key_size != 4 || 493 if (attr->max_entries == 0 || attr->key_size != 4 ||
492 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) 494 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
@@ -840,6 +842,12 @@ static int sock_map_update_elem(struct bpf_map *map,
840 return -EINVAL; 842 return -EINVAL;
841 } 843 }
842 844
845 if (skops.sk->sk_type != SOCK_STREAM ||
846 skops.sk->sk_protocol != IPPROTO_TCP) {
847 fput(socket->file);
848 return -EOPNOTSUPP;
849 }
850
843 err = sock_map_ctx_update_elem(&skops, map, key, flags); 851 err = sock_map_ctx_update_elem(&skops, map, key, flags);
844 fput(socket->file); 852 fput(socket->file);
845 return err; 853 return err;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8b8d6ba39e23..c48ca2a34b5e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1116,7 +1116,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1116 /* ctx accesses must be at a fixed offset, so that we can 1116 /* ctx accesses must be at a fixed offset, so that we can
1117 * determine what type of data were returned. 1117 * determine what type of data were returned.
1118 */ 1118 */
1119 if (!tnum_is_const(reg->var_off)) { 1119 if (reg->off) {
1120 verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
1121 regno, reg->off, off - reg->off);
1122 return -EACCES;
1123 }
1124 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1120 char tn_buf[48]; 1125 char tn_buf[48];
1121 1126
1122 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1127 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
@@ -1124,7 +1129,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1124 tn_buf, off, size); 1129 tn_buf, off, size);
1125 return -EACCES; 1130 return -EACCES;
1126 } 1131 }
1127 off += reg->var_off.value;
1128 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type); 1132 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1129 if (!err && t == BPF_READ && value_regno >= 0) { 1133 if (!err && t == BPF_READ && value_regno >= 0) {
1130 /* ctx access returns either a scalar, or a 1134 /* ctx access returns either a scalar, or a
@@ -2426,12 +2430,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2426} 2430}
2427 2431
2428static void find_good_pkt_pointers(struct bpf_verifier_state *state, 2432static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2429 struct bpf_reg_state *dst_reg) 2433 struct bpf_reg_state *dst_reg,
2434 bool range_right_open)
2430{ 2435{
2431 struct bpf_reg_state *regs = state->regs, *reg; 2436 struct bpf_reg_state *regs = state->regs, *reg;
2437 u16 new_range;
2432 int i; 2438 int i;
2433 2439
2434 if (dst_reg->off < 0) 2440 if (dst_reg->off < 0 ||
2441 (dst_reg->off == 0 && range_right_open))
2435 /* This doesn't give us any range */ 2442 /* This doesn't give us any range */
2436 return; 2443 return;
2437 2444
@@ -2442,9 +2449,13 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2442 */ 2449 */
2443 return; 2450 return;
2444 2451
2445 /* LLVM can generate four kind of checks: 2452 new_range = dst_reg->off;
2453 if (range_right_open)
2454 new_range--;
2455
2456 /* Examples for register markings:
2446 * 2457 *
2447 * Type 1/2: 2458 * pkt_data in dst register:
2448 * 2459 *
2449 * r2 = r3; 2460 * r2 = r3;
2450 * r2 += 8; 2461 * r2 += 8;
@@ -2461,7 +2472,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2461 * r2=pkt(id=n,off=8,r=0) 2472 * r2=pkt(id=n,off=8,r=0)
2462 * r3=pkt(id=n,off=0,r=0) 2473 * r3=pkt(id=n,off=0,r=0)
2463 * 2474 *
2464 * Type 3/4: 2475 * pkt_data in src register:
2465 * 2476 *
2466 * r2 = r3; 2477 * r2 = r3;
2467 * r2 += 8; 2478 * r2 += 8;
@@ -2479,7 +2490,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2479 * r3=pkt(id=n,off=0,r=0) 2490 * r3=pkt(id=n,off=0,r=0)
2480 * 2491 *
2481 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 2492 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
2482 * so that range of bytes [r3, r3 + 8) is safe to access. 2493 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
2494 * and [r3, r3 + 8-1) respectively is safe to access depending on
2495 * the check.
2483 */ 2496 */
2484 2497
2485 /* If our ids match, then we must have the same max_value. And we 2498 /* If our ids match, then we must have the same max_value. And we
@@ -2490,14 +2503,14 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2490 for (i = 0; i < MAX_BPF_REG; i++) 2503 for (i = 0; i < MAX_BPF_REG; i++)
2491 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) 2504 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
2492 /* keep the maximum range already checked */ 2505 /* keep the maximum range already checked */
2493 regs[i].range = max_t(u16, regs[i].range, dst_reg->off); 2506 regs[i].range = max(regs[i].range, new_range);
2494 2507
2495 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 2508 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
2496 if (state->stack_slot_type[i] != STACK_SPILL) 2509 if (state->stack_slot_type[i] != STACK_SPILL)
2497 continue; 2510 continue;
2498 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 2511 reg = &state->spilled_regs[i / BPF_REG_SIZE];
2499 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) 2512 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
2500 reg->range = max_t(u16, reg->range, dst_reg->off); 2513 reg->range = max(reg->range, new_range);
2501 } 2514 }
2502} 2515}
2503 2516
@@ -2861,19 +2874,43 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
2861 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && 2874 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
2862 dst_reg->type == PTR_TO_PACKET && 2875 dst_reg->type == PTR_TO_PACKET &&
2863 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 2876 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2864 find_good_pkt_pointers(this_branch, dst_reg); 2877 /* pkt_data' > pkt_end */
2878 find_good_pkt_pointers(this_branch, dst_reg, false);
2879 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
2880 dst_reg->type == PTR_TO_PACKET_END &&
2881 regs[insn->src_reg].type == PTR_TO_PACKET) {
2882 /* pkt_end > pkt_data' */
2883 find_good_pkt_pointers(other_branch, &regs[insn->src_reg], true);
2865 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT && 2884 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
2866 dst_reg->type == PTR_TO_PACKET && 2885 dst_reg->type == PTR_TO_PACKET &&
2867 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 2886 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2868 find_good_pkt_pointers(other_branch, dst_reg); 2887 /* pkt_data' < pkt_end */
2888 find_good_pkt_pointers(other_branch, dst_reg, true);
2889 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
2890 dst_reg->type == PTR_TO_PACKET_END &&
2891 regs[insn->src_reg].type == PTR_TO_PACKET) {
2892 /* pkt_end < pkt_data' */
2893 find_good_pkt_pointers(this_branch, &regs[insn->src_reg], false);
2894 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
2895 dst_reg->type == PTR_TO_PACKET &&
2896 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2897 /* pkt_data' >= pkt_end */
2898 find_good_pkt_pointers(this_branch, dst_reg, true);
2869 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && 2899 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
2870 dst_reg->type == PTR_TO_PACKET_END && 2900 dst_reg->type == PTR_TO_PACKET_END &&
2871 regs[insn->src_reg].type == PTR_TO_PACKET) { 2901 regs[insn->src_reg].type == PTR_TO_PACKET) {
2872 find_good_pkt_pointers(other_branch, &regs[insn->src_reg]); 2902 /* pkt_end >= pkt_data' */
2903 find_good_pkt_pointers(other_branch, &regs[insn->src_reg], false);
2904 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
2905 dst_reg->type == PTR_TO_PACKET &&
2906 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2907 /* pkt_data' <= pkt_end */
2908 find_good_pkt_pointers(other_branch, dst_reg, false);
2873 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE && 2909 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
2874 dst_reg->type == PTR_TO_PACKET_END && 2910 dst_reg->type == PTR_TO_PACKET_END &&
2875 regs[insn->src_reg].type == PTR_TO_PACKET) { 2911 regs[insn->src_reg].type == PTR_TO_PACKET) {
2876 find_good_pkt_pointers(this_branch, &regs[insn->src_reg]); 2912 /* pkt_end <= pkt_data' */
2913 find_good_pkt_pointers(this_branch, &regs[insn->src_reg], true);
2877 } else if (is_pointer_value(env, insn->dst_reg)) { 2914 } else if (is_pointer_value(env, insn->dst_reg)) {
2878 verbose("R%d pointer comparison prohibited\n", insn->dst_reg); 2915 verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
2879 return -EACCES; 2916 return -EACCES;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index d851df22f5c5..04892a82f6ac 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -632,6 +632,11 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
632 __cpuhp_kick_ap(st); 632 __cpuhp_kick_ap(st);
633 } 633 }
634 634
635 /*
636 * Clean up the leftovers so the next hotplug operation wont use stale
637 * data.
638 */
639 st->node = st->last = NULL;
635 return ret; 640 return ret;
636} 641}
637 642
diff --git a/kernel/exit.c b/kernel/exit.c
index cf28528842bc..f6cad39f35df 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1611,7 +1611,7 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1611 return err; 1611 return err;
1612 1612
1613 if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) 1613 if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
1614 goto Efault; 1614 return -EFAULT;
1615 1615
1616 user_access_begin(); 1616 user_access_begin();
1617 unsafe_put_user(signo, &infop->si_signo, Efault); 1617 unsafe_put_user(signo, &infop->si_signo, Efault);
@@ -1739,7 +1739,7 @@ COMPAT_SYSCALL_DEFINE5(waitid,
1739 return err; 1739 return err;
1740 1740
1741 if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) 1741 if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
1742 goto Efault; 1742 return -EFAULT;
1743 1743
1744 user_access_begin(); 1744 user_access_begin();
1745 unsafe_put_user(signo, &infop->si_signo, Efault); 1745 unsafe_put_user(signo, &infop->si_signo, Efault);
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index 5270a54b9fa4..c26c5bb6b491 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -135,17 +135,26 @@ void irq_gc_ack_clr_bit(struct irq_data *d)
135} 135}
136 136
137/** 137/**
138 * irq_gc_mask_disable_reg_and_ack - Mask and ack pending interrupt 138 * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
139 * @d: irq_data 139 * @d: irq_data
140 *
141 * This generic implementation of the irq_mask_ack method is for chips
142 * with separate enable/disable registers instead of a single mask
143 * register and where a pending interrupt is acknowledged by setting a
144 * bit.
145 *
146 * Note: This is the only permutation currently used. Similar generic
147 * functions should be added here if other permutations are required.
140 */ 148 */
141void irq_gc_mask_disable_reg_and_ack(struct irq_data *d) 149void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
142{ 150{
143 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 151 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
144 struct irq_chip_type *ct = irq_data_get_chip_type(d); 152 struct irq_chip_type *ct = irq_data_get_chip_type(d);
145 u32 mask = d->mask; 153 u32 mask = d->mask;
146 154
147 irq_gc_lock(gc); 155 irq_gc_lock(gc);
148 irq_reg_writel(gc, mask, ct->regs.mask); 156 irq_reg_writel(gc, mask, ct->regs.disable);
157 *ct->mask_cache &= ~mask;
149 irq_reg_writel(gc, mask, ct->regs.ack); 158 irq_reg_writel(gc, mask, ct->regs.ack);
150 irq_gc_unlock(gc); 159 irq_gc_unlock(gc);
151} 160}
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 729a8706751d..6d5880089ff6 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -854,7 +854,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
854/** 854/**
855 * call_srcu() - Queue a callback for invocation after an SRCU grace period 855 * call_srcu() - Queue a callback for invocation after an SRCU grace period
856 * @sp: srcu_struct in queue the callback 856 * @sp: srcu_struct in queue the callback
857 * @head: structure to be used for queueing the SRCU callback. 857 * @rhp: structure to be used for queueing the SRCU callback.
858 * @func: function to be invoked after the SRCU grace period 858 * @func: function to be invoked after the SRCU grace period
859 * 859 *
860 * The callback function will be invoked some time after a full SRCU 860 * The callback function will be invoked some time after a full SRCU
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 50d1861f7759..3f943efcf61c 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -85,6 +85,9 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
85} 85}
86 86
87/** 87/**
88 * rcu_sync_enter_start - Force readers onto slow path for multiple updates
89 * @rsp: Pointer to rcu_sync structure to use for synchronization
90 *
88 * Must be called after rcu_sync_init() and before first use. 91 * Must be called after rcu_sync_init() and before first use.
89 * 92 *
90 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}() 93 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
@@ -142,7 +145,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
142 145
143/** 146/**
144 * rcu_sync_func() - Callback function managing reader access to fastpath 147 * rcu_sync_func() - Callback function managing reader access to fastpath
145 * @rsp: Pointer to rcu_sync structure to use for synchronization 148 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
146 * 149 *
147 * This function is passed to one of the call_rcu() functions by 150 * This function is passed to one of the call_rcu() functions by
148 * rcu_sync_exit(), so that it is invoked after a grace period following the 151 * rcu_sync_exit(), so that it is invoked after a grace period following the
@@ -158,9 +161,9 @@ void rcu_sync_enter(struct rcu_sync *rsp)
158 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers 161 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers
159 * can again use their fastpaths. 162 * can again use their fastpaths.
160 */ 163 */
161static void rcu_sync_func(struct rcu_head *rcu) 164static void rcu_sync_func(struct rcu_head *rhp)
162{ 165{
163 struct rcu_sync *rsp = container_of(rcu, struct rcu_sync, cb_head); 166 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
164 unsigned long flags; 167 unsigned long flags;
165 168
166 BUG_ON(rsp->gp_state != GP_PASSED); 169 BUG_ON(rsp->gp_state != GP_PASSED);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b0ad62b0e7b8..3e3650e94ae6 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3097,9 +3097,10 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
3097 * read-side critical sections have completed. call_rcu_sched() assumes 3097 * read-side critical sections have completed. call_rcu_sched() assumes
3098 * that the read-side critical sections end on enabling of preemption 3098 * that the read-side critical sections end on enabling of preemption
3099 * or on voluntary preemption. 3099 * or on voluntary preemption.
3100 * RCU read-side critical sections are delimited by : 3100 * RCU read-side critical sections are delimited by:
3101 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR 3101 *
3102 * - anything that disables preemption. 3102 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
3103 * - anything that disables preemption.
3103 * 3104 *
3104 * These may be nested. 3105 * These may be nested.
3105 * 3106 *
@@ -3124,11 +3125,12 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
3124 * handler. This means that read-side critical sections in process 3125 * handler. This means that read-side critical sections in process
3125 * context must not be interrupted by softirqs. This interface is to be 3126 * context must not be interrupted by softirqs. This interface is to be
3126 * used when most of the read-side critical sections are in softirq context. 3127 * used when most of the read-side critical sections are in softirq context.
3127 * RCU read-side critical sections are delimited by : 3128 * RCU read-side critical sections are delimited by:
3128 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. 3129 *
3129 * OR 3130 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context, OR
3130 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. 3131 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
3131 * These may be nested. 3132 *
3133 * These may be nested.
3132 * 3134 *
3133 * See the description of call_rcu() for more detailed information on 3135 * See the description of call_rcu() for more detailed information on
3134 * memory ordering guarantees. 3136 * memory ordering guarantees.
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index a92fddc22747..dd7908743dab 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -18,6 +18,7 @@
18#include <linux/membarrier.h> 18#include <linux/membarrier.h>
19#include <linux/tick.h> 19#include <linux/tick.h>
20#include <linux/cpumask.h> 20#include <linux/cpumask.h>
21#include <linux/atomic.h>
21 22
22#include "sched.h" /* for cpu_rq(). */ 23#include "sched.h" /* for cpu_rq(). */
23 24
@@ -26,21 +27,26 @@
26 * except MEMBARRIER_CMD_QUERY. 27 * except MEMBARRIER_CMD_QUERY.
27 */ 28 */
28#define MEMBARRIER_CMD_BITMASK \ 29#define MEMBARRIER_CMD_BITMASK \
29 (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED) 30 (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
31 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED)
30 32
31static void ipi_mb(void *info) 33static void ipi_mb(void *info)
32{ 34{
33 smp_mb(); /* IPIs should be serializing but paranoid. */ 35 smp_mb(); /* IPIs should be serializing but paranoid. */
34} 36}
35 37
36static void membarrier_private_expedited(void) 38static int membarrier_private_expedited(void)
37{ 39{
38 int cpu; 40 int cpu;
39 bool fallback = false; 41 bool fallback = false;
40 cpumask_var_t tmpmask; 42 cpumask_var_t tmpmask;
41 43
44 if (!(atomic_read(&current->mm->membarrier_state)
45 & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
46 return -EPERM;
47
42 if (num_online_cpus() == 1) 48 if (num_online_cpus() == 1)
43 return; 49 return 0;
44 50
45 /* 51 /*
46 * Matches memory barriers around rq->curr modification in 52 * Matches memory barriers around rq->curr modification in
@@ -94,6 +100,24 @@ static void membarrier_private_expedited(void)
94 * rq->curr modification in scheduler. 100 * rq->curr modification in scheduler.
95 */ 101 */
96 smp_mb(); /* exit from system call is not a mb */ 102 smp_mb(); /* exit from system call is not a mb */
103 return 0;
104}
105
106static void membarrier_register_private_expedited(void)
107{
108 struct task_struct *p = current;
109 struct mm_struct *mm = p->mm;
110
111 /*
112 * We need to consider threads belonging to different thread
113 * groups, which use the same mm. (CLONE_VM but not
114 * CLONE_THREAD).
115 */
116 if (atomic_read(&mm->membarrier_state)
117 & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)
118 return;
119 atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
120 &mm->membarrier_state);
97} 121}
98 122
99/** 123/**
@@ -144,7 +168,9 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
144 synchronize_sched(); 168 synchronize_sched();
145 return 0; 169 return 0;
146 case MEMBARRIER_CMD_PRIVATE_EXPEDITED: 170 case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
147 membarrier_private_expedited(); 171 return membarrier_private_expedited();
172 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
173 membarrier_register_private_expedited();
148 return 0; 174 return 0;
149 default: 175 default:
150 return -EINVAL; 176 return -EINVAL;
diff --git a/lib/digsig.c b/lib/digsig.c
index 03d7c63837ae..6ba6fcd92dd1 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -87,6 +87,12 @@ static int digsig_verify_rsa(struct key *key,
87 down_read(&key->sem); 87 down_read(&key->sem);
88 ukp = user_key_payload_locked(key); 88 ukp = user_key_payload_locked(key);
89 89
90 if (!ukp) {
91 /* key was revoked before we acquired its semaphore */
92 err = -EKEYREVOKED;
93 goto err1;
94 }
95
90 if (ukp->datalen < sizeof(*pkh)) 96 if (ukp->datalen < sizeof(*pkh))
91 goto err1; 97 goto err1;
92 98
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index 5696a35184e4..69557c74ef9f 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -11,7 +11,7 @@
11 * ========================================================================== 11 * ==========================================================================
12 * 12 *
13 * A finite state machine consists of n states (struct ts_fsm_token) 13 * A finite state machine consists of n states (struct ts_fsm_token)
14 * representing the pattern as a finite automation. The data is read 14 * representing the pattern as a finite automaton. The data is read
15 * sequentially on an octet basis. Every state token specifies the number 15 * sequentially on an octet basis. Every state token specifies the number
16 * of recurrences and the type of value accepted which can be either a 16 * of recurrences and the type of value accepted which can be either a
17 * specific character or ctype based set of characters. The available 17 * specific character or ctype based set of characters. The available
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 632f783e65f1..ffbe66cbb0ed 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -27,7 +27,7 @@
27 * 27 *
28 * [1] Cormen, Leiserson, Rivest, Stein 28 * [1] Cormen, Leiserson, Rivest, Stein
29 * Introdcution to Algorithms, 2nd Edition, MIT Press 29 * Introdcution to Algorithms, 2nd Edition, MIT Press
30 * [2] See finite automation theory 30 * [2] See finite automaton theory
31 */ 31 */
32 32
33#include <linux/module.h> 33#include <linux/module.h>
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d5f3a62887cf..661f046ad318 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5828,21 +5828,6 @@ void mem_cgroup_sk_alloc(struct sock *sk)
5828 if (!mem_cgroup_sockets_enabled) 5828 if (!mem_cgroup_sockets_enabled)
5829 return; 5829 return;
5830 5830
5831 /*
5832 * Socket cloning can throw us here with sk_memcg already
5833 * filled. It won't however, necessarily happen from
5834 * process context. So the test for root memcg given
5835 * the current task's memcg won't help us in this case.
5836 *
5837 * Respecting the original socket's memcg is a better
5838 * decision in this case.
5839 */
5840 if (sk->sk_memcg) {
5841 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5842 css_get(&sk->sk_memcg->css);
5843 return;
5844 }
5845
5846 rcu_read_lock(); 5831 rcu_read_lock();
5847 memcg = mem_cgroup_from_task(current); 5832 memcg = mem_cgroup_from_task(current);
5848 if (memcg == root_mem_cgroup) 5833 if (memcg == root_mem_cgroup)
diff --git a/mm/percpu.c b/mm/percpu.c
index aa121cef76de..a0e0c82c1e4c 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1329,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1329 * @gfp: allocation flags 1329 * @gfp: allocation flags
1330 * 1330 *
1331 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 1331 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1332 * contain %GFP_KERNEL, the allocation is atomic. 1332 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1333 * then no warning will be triggered on invalid or failed allocation
1334 * requests.
1333 * 1335 *
1334 * RETURNS: 1336 * RETURNS:
1335 * Percpu pointer to the allocated area on success, NULL on failure. 1337 * Percpu pointer to the allocated area on success, NULL on failure.
@@ -1337,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1337static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 1339static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1338 gfp_t gfp) 1340 gfp_t gfp)
1339{ 1341{
1342 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1343 bool do_warn = !(gfp & __GFP_NOWARN);
1340 static int warn_limit = 10; 1344 static int warn_limit = 10;
1341 struct pcpu_chunk *chunk; 1345 struct pcpu_chunk *chunk;
1342 const char *err; 1346 const char *err;
1343 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1344 int slot, off, cpu, ret; 1347 int slot, off, cpu, ret;
1345 unsigned long flags; 1348 unsigned long flags;
1346 void __percpu *ptr; 1349 void __percpu *ptr;
@@ -1361,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1361 1364
1362 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 1365 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1363 !is_power_of_2(align))) { 1366 !is_power_of_2(align))) {
1364 WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n", 1367 WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1365 size, align); 1368 size, align);
1366 return NULL; 1369 return NULL;
1367 } 1370 }
@@ -1482,7 +1485,7 @@ fail_unlock:
1482fail: 1485fail:
1483 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1486 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1484 1487
1485 if (!is_atomic && warn_limit) { 1488 if (!is_atomic && do_warn && warn_limit) {
1486 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 1489 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1487 size, align, is_atomic, err); 1490 size, align, is_atomic, err);
1488 dump_stack(); 1491 dump_stack();
@@ -1507,7 +1510,9 @@ fail:
1507 * 1510 *
1508 * Allocate zero-filled percpu area of @size bytes aligned at @align. If 1511 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1509 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 1512 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1510 * be called from any context but is a lot more likely to fail. 1513 * be called from any context but is a lot more likely to fail. If @gfp
1514 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1515 * allocation requests.
1511 * 1516 *
1512 * RETURNS: 1517 * RETURNS:
1513 * Percpu pointer to the allocated area on success, NULL on failure. 1518 * Percpu pointer to the allocated area on success, NULL on failure.
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 3bc890716c89..de2152730809 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -573,7 +573,7 @@ static int br_process_vlan_info(struct net_bridge *br,
573 } 573 }
574 *vinfo_last = NULL; 574 *vinfo_last = NULL;
575 575
576 return 0; 576 return err;
577 } 577 }
578 578
579 return br_vlan_info(br, p, cmd, vinfo_curr); 579 return br_vlan_info(br, p, cmd, vinfo_curr);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 88edac0f3e36..ecd5c703d11e 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -78,7 +78,7 @@ MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
78static struct kmem_cache *rcv_cache __read_mostly; 78static struct kmem_cache *rcv_cache __read_mostly;
79 79
80/* table of registered CAN protocols */ 80/* table of registered CAN protocols */
81static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly; 81static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly;
82static DEFINE_MUTEX(proto_tab_lock); 82static DEFINE_MUTEX(proto_tab_lock);
83 83
84static atomic_t skbcounter = ATOMIC_INIT(0); 84static atomic_t skbcounter = ATOMIC_INIT(0);
@@ -788,7 +788,7 @@ int can_proto_register(const struct can_proto *cp)
788 788
789 mutex_lock(&proto_tab_lock); 789 mutex_lock(&proto_tab_lock);
790 790
791 if (proto_tab[proto]) { 791 if (rcu_access_pointer(proto_tab[proto])) {
792 pr_err("can: protocol %d already registered\n", proto); 792 pr_err("can: protocol %d already registered\n", proto);
793 err = -EBUSY; 793 err = -EBUSY;
794 } else 794 } else
@@ -812,7 +812,7 @@ void can_proto_unregister(const struct can_proto *cp)
812 int proto = cp->protocol; 812 int proto = cp->protocol;
813 813
814 mutex_lock(&proto_tab_lock); 814 mutex_lock(&proto_tab_lock);
815 BUG_ON(proto_tab[proto] != cp); 815 BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp);
816 RCU_INIT_POINTER(proto_tab[proto], NULL); 816 RCU_INIT_POINTER(proto_tab[proto], NULL);
817 mutex_unlock(&proto_tab_lock); 817 mutex_unlock(&proto_tab_lock);
818 818
@@ -875,9 +875,14 @@ static int can_pernet_init(struct net *net)
875 spin_lock_init(&net->can.can_rcvlists_lock); 875 spin_lock_init(&net->can.can_rcvlists_lock);
876 net->can.can_rx_alldev_list = 876 net->can.can_rx_alldev_list =
877 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); 877 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
878 878 if (!net->can.can_rx_alldev_list)
879 goto out;
879 net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL); 880 net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL);
881 if (!net->can.can_stats)
882 goto out_free_alldev_list;
880 net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL); 883 net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL);
884 if (!net->can.can_pstats)
885 goto out_free_can_stats;
881 886
882 if (IS_ENABLED(CONFIG_PROC_FS)) { 887 if (IS_ENABLED(CONFIG_PROC_FS)) {
883 /* the statistics are updated every second (timer triggered) */ 888 /* the statistics are updated every second (timer triggered) */
@@ -892,6 +897,13 @@ static int can_pernet_init(struct net *net)
892 } 897 }
893 898
894 return 0; 899 return 0;
900
901 out_free_can_stats:
902 kfree(net->can.can_stats);
903 out_free_alldev_list:
904 kfree(net->can.can_rx_alldev_list);
905 out:
906 return -ENOMEM;
895} 907}
896 908
897static void can_pernet_exit(struct net *net) 909static void can_pernet_exit(struct net *net)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 47a8748d953a..13690334efa3 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1493,13 +1493,14 @@ static int bcm_init(struct sock *sk)
1493static int bcm_release(struct socket *sock) 1493static int bcm_release(struct socket *sock)
1494{ 1494{
1495 struct sock *sk = sock->sk; 1495 struct sock *sk = sock->sk;
1496 struct net *net = sock_net(sk); 1496 struct net *net;
1497 struct bcm_sock *bo; 1497 struct bcm_sock *bo;
1498 struct bcm_op *op, *next; 1498 struct bcm_op *op, *next;
1499 1499
1500 if (sk == NULL) 1500 if (!sk)
1501 return 0; 1501 return 0;
1502 1502
1503 net = sock_net(sk);
1503 bo = bcm_sk(sk); 1504 bo = bcm_sk(sk);
1504 1505
1505 /* remove bcm_ops, timer, rx_unregister(), etc. */ 1506 /* remove bcm_ops, timer, rx_unregister(), etc. */
diff --git a/net/core/dev.c b/net/core/dev.c
index 588b473194a8..11596a302a26 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1147,9 +1147,8 @@ static int dev_alloc_name_ns(struct net *net,
1147 return ret; 1147 return ret;
1148} 1148}
1149 1149
1150static int dev_get_valid_name(struct net *net, 1150int dev_get_valid_name(struct net *net, struct net_device *dev,
1151 struct net_device *dev, 1151 const char *name)
1152 const char *name)
1153{ 1152{
1154 BUG_ON(!net); 1153 BUG_ON(!net);
1155 1154
@@ -1165,6 +1164,7 @@ static int dev_get_valid_name(struct net *net,
1165 1164
1166 return 0; 1165 return 0;
1167} 1166}
1167EXPORT_SYMBOL(dev_get_valid_name);
1168 1168
1169/** 1169/**
1170 * dev_change_name - change name of a device 1170 * dev_change_name - change name of a device
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 709a4e6fb447..f9c7a88cd981 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -303,7 +303,18 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
303 case SIOCSIFTXQLEN: 303 case SIOCSIFTXQLEN:
304 if (ifr->ifr_qlen < 0) 304 if (ifr->ifr_qlen < 0)
305 return -EINVAL; 305 return -EINVAL;
306 dev->tx_queue_len = ifr->ifr_qlen; 306 if (dev->tx_queue_len ^ ifr->ifr_qlen) {
307 unsigned int orig_len = dev->tx_queue_len;
308
309 dev->tx_queue_len = ifr->ifr_qlen;
310 err = call_netdevice_notifiers(
311 NETDEV_CHANGE_TX_QUEUE_LEN, dev);
312 err = notifier_to_errno(err);
313 if (err) {
314 dev->tx_queue_len = orig_len;
315 return err;
316 }
317 }
307 return 0; 318 return 0;
308 319
309 case SIOCSIFNAME: 320 case SIOCSIFNAME:
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 3228411ada0f..9a9a3d77e327 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -436,7 +436,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
436EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32); 436EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32);
437 437
438/* return false if legacy contained non-0 deprecated fields 438/* return false if legacy contained non-0 deprecated fields
439 * transceiver/maxtxpkt/maxrxpkt. rest of ksettings always updated 439 * maxtxpkt/maxrxpkt. rest of ksettings always updated
440 */ 440 */
441static bool 441static bool
442convert_legacy_settings_to_link_ksettings( 442convert_legacy_settings_to_link_ksettings(
@@ -451,8 +451,7 @@ convert_legacy_settings_to_link_ksettings(
451 * deprecated legacy fields, and they should not use 451 * deprecated legacy fields, and they should not use
452 * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS 452 * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS
453 */ 453 */
454 if (legacy_settings->transceiver || 454 if (legacy_settings->maxtxpkt ||
455 legacy_settings->maxtxpkt ||
456 legacy_settings->maxrxpkt) 455 legacy_settings->maxrxpkt)
457 retval = false; 456 retval = false;
458 457
diff --git a/net/core/filter.c b/net/core/filter.c
index 74b8c91fb5f4..aa0265997f93 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1839,31 +1839,31 @@ static const struct bpf_func_proto bpf_redirect_proto = {
1839 .arg2_type = ARG_ANYTHING, 1839 .arg2_type = ARG_ANYTHING,
1840}; 1840};
1841 1841
1842BPF_CALL_3(bpf_sk_redirect_map, struct bpf_map *, map, u32, key, u64, flags) 1842BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
1843 struct bpf_map *, map, u32, key, u64, flags)
1843{ 1844{
1844 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 1845 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1845 1846
1846 if (unlikely(flags)) 1847 if (unlikely(flags))
1847 return SK_ABORTED; 1848 return SK_ABORTED;
1848 1849
1849 ri->ifindex = key; 1850 tcb->bpf.key = key;
1850 ri->flags = flags; 1851 tcb->bpf.flags = flags;
1851 ri->map = map; 1852 tcb->bpf.map = map;
1852 1853
1853 return SK_REDIRECT; 1854 return SK_REDIRECT;
1854} 1855}
1855 1856
1856struct sock *do_sk_redirect_map(void) 1857struct sock *do_sk_redirect_map(struct sk_buff *skb)
1857{ 1858{
1858 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 1859 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1859 struct sock *sk = NULL; 1860 struct sock *sk = NULL;
1860 1861
1861 if (ri->map) { 1862 if (tcb->bpf.map) {
1862 sk = __sock_map_lookup_elem(ri->map, ri->ifindex); 1863 sk = __sock_map_lookup_elem(tcb->bpf.map, tcb->bpf.key);
1863 1864
1864 ri->ifindex = 0; 1865 tcb->bpf.key = 0;
1865 ri->map = NULL; 1866 tcb->bpf.map = NULL;
1866 /* we do not clear flags for future lookup */
1867 } 1867 }
1868 1868
1869 return sk; 1869 return sk;
@@ -1873,9 +1873,10 @@ static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
1873 .func = bpf_sk_redirect_map, 1873 .func = bpf_sk_redirect_map,
1874 .gpl_only = false, 1874 .gpl_only = false,
1875 .ret_type = RET_INTEGER, 1875 .ret_type = RET_INTEGER,
1876 .arg1_type = ARG_CONST_MAP_PTR, 1876 .arg1_type = ARG_PTR_TO_CTX,
1877 .arg2_type = ARG_ANYTHING, 1877 .arg2_type = ARG_CONST_MAP_PTR,
1878 .arg3_type = ARG_ANYTHING, 1878 .arg3_type = ARG_ANYTHING,
1879 .arg4_type = ARG_ANYTHING,
1879}; 1880};
1880 1881
1881BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) 1882BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
@@ -3683,7 +3684,6 @@ static bool sk_skb_is_valid_access(int off, int size,
3683{ 3684{
3684 if (type == BPF_WRITE) { 3685 if (type == BPF_WRITE) {
3685 switch (off) { 3686 switch (off) {
3686 case bpf_ctx_range(struct __sk_buff, mark):
3687 case bpf_ctx_range(struct __sk_buff, tc_index): 3687 case bpf_ctx_range(struct __sk_buff, tc_index):
3688 case bpf_ctx_range(struct __sk_buff, priority): 3688 case bpf_ctx_range(struct __sk_buff, priority):
3689 break; 3689 break;
@@ -3693,6 +3693,7 @@ static bool sk_skb_is_valid_access(int off, int size,
3693 } 3693 }
3694 3694
3695 switch (off) { 3695 switch (off) {
3696 case bpf_ctx_range(struct __sk_buff, mark):
3696 case bpf_ctx_range(struct __sk_buff, tc_classid): 3697 case bpf_ctx_range(struct __sk_buff, tc_classid):
3697 return false; 3698 return false;
3698 case bpf_ctx_range(struct __sk_buff, data): 3699 case bpf_ctx_range(struct __sk_buff, data):
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d4bcdcc68e92..5ace48926b19 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1483,7 +1483,10 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1483 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1483 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1484 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1484 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1485 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1485 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1486 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 1486 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1487 * allow 0-length string (needed to remove an alias).
1488 */
1489 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1487 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1490 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1488 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1491 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1489 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1492 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
@@ -2093,7 +2096,7 @@ static int do_setlink(const struct sk_buff *skb,
2093 dev->tx_queue_len = orig_len; 2096 dev->tx_queue_len = orig_len;
2094 goto errout; 2097 goto errout;
2095 } 2098 }
2096 status |= DO_SETLINK_NOTIFY; 2099 status |= DO_SETLINK_MODIFIED;
2097 } 2100 }
2098 } 2101 }
2099 2102
@@ -2248,7 +2251,7 @@ static int do_setlink(const struct sk_buff *skb,
2248 2251
2249errout: 2252errout:
2250 if (status & DO_SETLINK_MODIFIED) { 2253 if (status & DO_SETLINK_MODIFIED) {
2251 if (status & DO_SETLINK_NOTIFY) 2254 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
2252 netdev_state_change(dev); 2255 netdev_state_change(dev);
2253 2256
2254 if (err < 0) 2257 if (err < 0)
@@ -4279,13 +4282,17 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
4279 4282
4280 switch (event) { 4283 switch (event) {
4281 case NETDEV_REBOOT: 4284 case NETDEV_REBOOT:
4285 case NETDEV_CHANGEMTU:
4282 case NETDEV_CHANGEADDR: 4286 case NETDEV_CHANGEADDR:
4283 case NETDEV_CHANGENAME: 4287 case NETDEV_CHANGENAME:
4284 case NETDEV_FEAT_CHANGE: 4288 case NETDEV_FEAT_CHANGE:
4285 case NETDEV_BONDING_FAILOVER: 4289 case NETDEV_BONDING_FAILOVER:
4290 case NETDEV_POST_TYPE_CHANGE:
4286 case NETDEV_NOTIFY_PEERS: 4291 case NETDEV_NOTIFY_PEERS:
4292 case NETDEV_CHANGEUPPER:
4287 case NETDEV_RESEND_IGMP: 4293 case NETDEV_RESEND_IGMP:
4288 case NETDEV_CHANGEINFODATA: 4294 case NETDEV_CHANGEINFODATA:
4295 case NETDEV_CHANGE_TX_QUEUE_LEN:
4289 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 4296 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
4290 GFP_KERNEL); 4297 GFP_KERNEL);
4291 break; 4298 break;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 16982de649b9..24656076906d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1124,9 +1124,13 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1124 1124
1125 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); 1125 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1126 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1126 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1127 struct sock *save_sk = skb->sk;
1128
1127 /* Streams do not free skb on error. Reset to prev state. */ 1129 /* Streams do not free skb on error. Reset to prev state. */
1128 msg->msg_iter = orig_iter; 1130 msg->msg_iter = orig_iter;
1131 skb->sk = sk;
1129 ___pskb_trim(skb, orig_len); 1132 ___pskb_trim(skb, orig_len);
1133 skb->sk = save_sk;
1130 return err; 1134 return err;
1131 } 1135 }
1132 1136
@@ -1896,7 +1900,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
1896 } 1900 }
1897 1901
1898 /* If we need update frag list, we are in troubles. 1902 /* If we need update frag list, we are in troubles.
1899 * Certainly, it possible to add an offset to skb data, 1903 * Certainly, it is possible to add an offset to skb data,
1900 * but taking into account that pulling is expected to 1904 * but taking into account that pulling is expected to
1901 * be very rare operation, it is worth to fight against 1905 * be very rare operation, it is worth to fight against
1902 * further bloating skb head and crucify ourselves here instead. 1906 * further bloating skb head and crucify ourselves here instead.
diff --git a/net/core/sock.c b/net/core/sock.c
index 23953b741a41..415f441c63b9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1677,12 +1677,17 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1677 newsk->sk_dst_pending_confirm = 0; 1677 newsk->sk_dst_pending_confirm = 0;
1678 newsk->sk_wmem_queued = 0; 1678 newsk->sk_wmem_queued = 0;
1679 newsk->sk_forward_alloc = 0; 1679 newsk->sk_forward_alloc = 0;
1680
1681 /* sk->sk_memcg will be populated at accept() time */
1682 newsk->sk_memcg = NULL;
1683
1680 atomic_set(&newsk->sk_drops, 0); 1684 atomic_set(&newsk->sk_drops, 0);
1681 newsk->sk_send_head = NULL; 1685 newsk->sk_send_head = NULL;
1682 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1686 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1683 atomic_set(&newsk->sk_zckey, 0); 1687 atomic_set(&newsk->sk_zckey, 0);
1684 1688
1685 sock_reset_flag(newsk, SOCK_DONE); 1689 sock_reset_flag(newsk, SOCK_DONE);
1690 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1686 1691
1687 rcu_read_lock(); 1692 rcu_read_lock();
1688 filter = rcu_dereference(sk->sk_filter); 1693 filter = rcu_dereference(sk->sk_filter);
@@ -1714,9 +1719,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1714 newsk->sk_incoming_cpu = raw_smp_processor_id(); 1719 newsk->sk_incoming_cpu = raw_smp_processor_id();
1715 atomic64_set(&newsk->sk_cookie, 0); 1720 atomic64_set(&newsk->sk_cookie, 0);
1716 1721
1717 mem_cgroup_sk_alloc(newsk);
1718 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1719
1720 /* 1722 /*
1721 * Before updating sk_refcnt, we must commit prior changes to memory 1723 * Before updating sk_refcnt, we must commit prior changes to memory
1722 * (Documentation/RCU/rculist_nulls.txt for details) 1724 * (Documentation/RCU/rculist_nulls.txt for details)
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index eed1ebf7f29d..b1e0dbea1e8c 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
36 * soft irq of receive path or setsockopt from process context 36 * soft irq of receive path or setsockopt from process context
37 */ 37 */
38 spin_lock_bh(&reuseport_lock); 38 spin_lock_bh(&reuseport_lock);
39 WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb, 39
40 lockdep_is_held(&reuseport_lock)), 40 /* Allocation attempts can occur concurrently via the setsockopt path
41 "multiple allocations for the same socket"); 41 * and the bind/hash path. Nothing to do when we lose the race.
42 */
43 if (rcu_dereference_protected(sk->sk_reuseport_cb,
44 lockdep_is_held(&reuseport_lock)))
45 goto out;
46
42 reuse = __reuseport_alloc(INIT_SOCKS); 47 reuse = __reuseport_alloc(INIT_SOCKS);
43 if (!reuse) { 48 if (!reuse) {
44 spin_unlock_bh(&reuseport_lock); 49 spin_unlock_bh(&reuseport_lock);
@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
49 reuse->num_socks = 1; 54 reuse->num_socks = 1;
50 rcu_assign_pointer(sk->sk_reuseport_cb, reuse); 55 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
51 56
57out:
52 spin_unlock_bh(&reuseport_lock); 58 spin_unlock_bh(&reuseport_lock);
53 59
54 return 0; 60 return 0;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 001c08696334..0490916864f9 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
414 sk_daddr_set(newsk, ireq->ir_rmt_addr); 414 sk_daddr_set(newsk, ireq->ir_rmt_addr);
415 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); 415 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
416 newinet->inet_saddr = ireq->ir_loc_addr; 416 newinet->inet_saddr = ireq->ir_loc_addr;
417 newinet->inet_opt = ireq->opt; 417 RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
418 ireq->opt = NULL;
419 newinet->mc_index = inet_iif(skb); 418 newinet->mc_index = inet_iif(skb);
420 newinet->mc_ttl = ip_hdr(skb)->ttl; 419 newinet->mc_ttl = ip_hdr(skb)->ttl;
421 newinet->inet_id = jiffies; 420 newinet->inet_id = jiffies;
@@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
430 if (__inet_inherit_port(sk, newsk) < 0) 429 if (__inet_inherit_port(sk, newsk) < 0)
431 goto put_and_exit; 430 goto put_and_exit;
432 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); 431 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
433 432 if (*own_req)
433 ireq->ireq_opt = NULL;
434 else
435 newinet->inet_opt = NULL;
434 return newsk; 436 return newsk;
435 437
436exit_overflow: 438exit_overflow:
@@ -441,6 +443,7 @@ exit:
441 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); 443 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
442 return NULL; 444 return NULL;
443put_and_exit: 445put_and_exit:
446 newinet->inet_opt = NULL;
444 inet_csk_prepare_forced_close(newsk); 447 inet_csk_prepare_forced_close(newsk);
445 dccp_done(newsk); 448 dccp_done(newsk);
446 goto exit; 449 goto exit;
@@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
492 ireq->ir_rmt_addr); 495 ireq->ir_rmt_addr);
493 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 496 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
494 ireq->ir_rmt_addr, 497 ireq->ir_rmt_addr,
495 ireq->opt); 498 rcu_dereference(ireq->ireq_opt));
496 err = net_xmit_eval(err); 499 err = net_xmit_eval(err);
497 } 500 }
498 501
@@ -548,7 +551,7 @@ out:
548static void dccp_v4_reqsk_destructor(struct request_sock *req) 551static void dccp_v4_reqsk_destructor(struct request_sock *req)
549{ 552{
550 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); 553 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
551 kfree(inet_rsk(req)->opt); 554 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
552} 555}
553 556
554void dccp_syn_ack_timeout(const struct request_sock *req) 557void dccp_syn_ack_timeout(const struct request_sock *req)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 8737412c7b27..e1d4d898a007 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -224,7 +224,7 @@ static int dns_resolver_match_preparse(struct key_match_data *match_data)
224static void dns_resolver_describe(const struct key *key, struct seq_file *m) 224static void dns_resolver_describe(const struct key *key, struct seq_file *m)
225{ 225{
226 seq_puts(m, key->description); 226 seq_puts(m, key->description);
227 if (key_is_instantiated(key)) { 227 if (key_is_positive(key)) {
228 int err = PTR_ERR(key->payload.data[dns_key_error]); 228 int err = PTR_ERR(key->payload.data[dns_key_error]);
229 229
230 if (err) 230 if (err)
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 91a2557942fa..f48fe6fc7e8c 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -70,11 +70,9 @@ config IP_MULTIPLE_TABLES
70 address into account. Furthermore, the TOS (Type-Of-Service) field 70 address into account. Furthermore, the TOS (Type-Of-Service) field
71 of the packet can be used for routing decisions as well. 71 of the packet can be used for routing decisions as well.
72 72
73 If you are interested in this, please see the preliminary 73 If you need more information, see the Linux Advanced
74 documentation at <http://www.compendium.com.ar/policy-routing.txt> 74 Routing and Traffic Control documentation at
75 and <ftp://post.tepkom.ru/pub/vol2/Linux/docs/advanced-routing.tex>. 75 <http://lartc.org/howto/lartc.rpdb.html>
76 You will need supporting software from
77 <ftp://ftp.tux.org/pub/net/ip-routing/>.
78 76
79 If unsure, say N. 77 If unsure, say N.
80 78
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2ae8f54cb321..82178cc69c96 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1951,7 +1951,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
1951 buf = NULL; 1951 buf = NULL;
1952 1952
1953 req_inet = inet_rsk(req); 1953 req_inet = inet_rsk(req);
1954 opt = xchg(&req_inet->opt, opt); 1954 opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
1955 if (opt) 1955 if (opt)
1956 kfree_rcu(opt, rcu); 1956 kfree_rcu(opt, rcu);
1957 1957
@@ -1973,11 +1973,13 @@ req_setattr_failure:
1973 * values on failure. 1973 * values on failure.
1974 * 1974 *
1975 */ 1975 */
1976static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) 1976static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
1977{ 1977{
1978 struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
1978 int hdr_delta = 0; 1979 int hdr_delta = 0;
1979 struct ip_options_rcu *opt = *opt_ptr;
1980 1980
1981 if (!opt || opt->opt.cipso == 0)
1982 return 0;
1981 if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { 1983 if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
1982 u8 cipso_len; 1984 u8 cipso_len;
1983 u8 cipso_off; 1985 u8 cipso_off;
@@ -2039,14 +2041,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
2039 */ 2041 */
2040void cipso_v4_sock_delattr(struct sock *sk) 2042void cipso_v4_sock_delattr(struct sock *sk)
2041{ 2043{
2042 int hdr_delta;
2043 struct ip_options_rcu *opt;
2044 struct inet_sock *sk_inet; 2044 struct inet_sock *sk_inet;
2045 int hdr_delta;
2045 2046
2046 sk_inet = inet_sk(sk); 2047 sk_inet = inet_sk(sk);
2047 opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
2048 if (!opt || opt->opt.cipso == 0)
2049 return;
2050 2048
2051 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); 2049 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
2052 if (sk_inet->is_icsk && hdr_delta > 0) { 2050 if (sk_inet->is_icsk && hdr_delta > 0) {
@@ -2066,15 +2064,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
2066 */ 2064 */
2067void cipso_v4_req_delattr(struct request_sock *req) 2065void cipso_v4_req_delattr(struct request_sock *req)
2068{ 2066{
2069 struct ip_options_rcu *opt; 2067 cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
2070 struct inet_request_sock *req_inet;
2071
2072 req_inet = inet_rsk(req);
2073 opt = req_inet->opt;
2074 if (!opt || opt->opt.cipso == 0)
2075 return;
2076
2077 cipso_v4_delopt(&req_inet->opt);
2078} 2068}
2079 2069
2080/** 2070/**
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index c039c937ba90..5ec9136a7c36 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -475,6 +475,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
475 } 475 }
476 spin_unlock_bh(&queue->fastopenq.lock); 476 spin_unlock_bh(&queue->fastopenq.lock);
477 } 477 }
478 mem_cgroup_sk_alloc(newsk);
478out: 479out:
479 release_sock(sk); 480 release_sock(sk);
480 if (req) 481 if (req)
@@ -539,9 +540,10 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
539{ 540{
540 const struct inet_request_sock *ireq = inet_rsk(req); 541 const struct inet_request_sock *ireq = inet_rsk(req);
541 struct net *net = read_pnet(&ireq->ireq_net); 542 struct net *net = read_pnet(&ireq->ireq_net);
542 struct ip_options_rcu *opt = ireq->opt; 543 struct ip_options_rcu *opt;
543 struct rtable *rt; 544 struct rtable *rt;
544 545
546 opt = rcu_dereference(ireq->ireq_opt);
545 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 547 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
546 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 548 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
547 sk->sk_protocol, inet_sk_flowi_flags(sk), 549 sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -575,10 +577,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
575 struct flowi4 *fl4; 577 struct flowi4 *fl4;
576 struct rtable *rt; 578 struct rtable *rt;
577 579
580 opt = rcu_dereference(ireq->ireq_opt);
578 fl4 = &newinet->cork.fl.u.ip4; 581 fl4 = &newinet->cork.fl.u.ip4;
579 582
580 rcu_read_lock();
581 opt = rcu_dereference(newinet->inet_opt);
582 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 583 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
583 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 584 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
584 sk->sk_protocol, inet_sk_flowi_flags(sk), 585 sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -591,13 +592,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
591 goto no_route; 592 goto no_route;
592 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) 593 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
593 goto route_err; 594 goto route_err;
594 rcu_read_unlock();
595 return &rt->dst; 595 return &rt->dst;
596 596
597route_err: 597route_err:
598 ip_rt_put(rt); 598 ip_rt_put(rt);
599no_route: 599no_route:
600 rcu_read_unlock();
601 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 600 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
602 return NULL; 601 return NULL;
603} 602}
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 597bb4cfe805..e7d15fb0d94d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -456,10 +456,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
456 return reuseport_add_sock(sk, sk2); 456 return reuseport_add_sock(sk, sk2);
457 } 457 }
458 458
459 /* Initial allocation may have already happened via setsockopt */ 459 return reuseport_alloc(sk);
460 if (!rcu_access_pointer(sk->sk_reuseport_cb))
461 return reuseport_alloc(sk);
462 return 0;
463} 460}
464 461
465int __inet_hash(struct sock *sk, struct sock *osk) 462int __inet_hash(struct sock *sk, struct sock *osk)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b1bb1b3a1082..77cf32a80952 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -355,7 +355,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
355 /* We throwed the options of the initial SYN away, so we hope 355 /* We throwed the options of the initial SYN away, so we hope
356 * the ACK carries the same options again (see RFC1122 4.2.3.8) 356 * the ACK carries the same options again (see RFC1122 4.2.3.8)
357 */ 357 */
358 ireq->opt = tcp_v4_save_options(sock_net(sk), skb); 358 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb));
359 359
360 if (security_inet_conn_request(sk, skb, req)) { 360 if (security_inet_conn_request(sk, skb, req)) {
361 reqsk_free(req); 361 reqsk_free(req);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c5d7656beeee..7eec3383702b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6196,7 +6196,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
6196 struct inet_request_sock *ireq = inet_rsk(req); 6196 struct inet_request_sock *ireq = inet_rsk(req);
6197 6197
6198 kmemcheck_annotate_bitfield(ireq, flags); 6198 kmemcheck_annotate_bitfield(ireq, flags);
6199 ireq->opt = NULL; 6199 ireq->ireq_opt = NULL;
6200#if IS_ENABLED(CONFIG_IPV6) 6200#if IS_ENABLED(CONFIG_IPV6)
6201 ireq->pktopts = NULL; 6201 ireq->pktopts = NULL;
6202#endif 6202#endif
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 85164d4d3e53..4c43365c374c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -877,7 +877,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
877 877
878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
879 ireq->ir_rmt_addr, 879 ireq->ir_rmt_addr,
880 ireq->opt); 880 rcu_dereference(ireq->ireq_opt));
881 err = net_xmit_eval(err); 881 err = net_xmit_eval(err);
882 } 882 }
883 883
@@ -889,7 +889,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
889 */ 889 */
890static void tcp_v4_reqsk_destructor(struct request_sock *req) 890static void tcp_v4_reqsk_destructor(struct request_sock *req)
891{ 891{
892 kfree(inet_rsk(req)->opt); 892 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
893} 893}
894 894
895#ifdef CONFIG_TCP_MD5SIG 895#ifdef CONFIG_TCP_MD5SIG
@@ -1265,10 +1265,11 @@ static void tcp_v4_init_req(struct request_sock *req,
1265 struct sk_buff *skb) 1265 struct sk_buff *skb)
1266{ 1266{
1267 struct inet_request_sock *ireq = inet_rsk(req); 1267 struct inet_request_sock *ireq = inet_rsk(req);
1268 struct net *net = sock_net(sk_listener);
1268 1269
1269 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); 1270 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1270 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); 1271 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1271 ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb); 1272 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1272} 1273}
1273 1274
1274static struct dst_entry *tcp_v4_route_req(const struct sock *sk, 1275static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
@@ -1355,10 +1356,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1355 sk_daddr_set(newsk, ireq->ir_rmt_addr); 1356 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1356 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); 1357 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1357 newsk->sk_bound_dev_if = ireq->ir_iif; 1358 newsk->sk_bound_dev_if = ireq->ir_iif;
1358 newinet->inet_saddr = ireq->ir_loc_addr; 1359 newinet->inet_saddr = ireq->ir_loc_addr;
1359 inet_opt = ireq->opt; 1360 inet_opt = rcu_dereference(ireq->ireq_opt);
1360 rcu_assign_pointer(newinet->inet_opt, inet_opt); 1361 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1361 ireq->opt = NULL;
1362 newinet->mc_index = inet_iif(skb); 1362 newinet->mc_index = inet_iif(skb);
1363 newinet->mc_ttl = ip_hdr(skb)->ttl; 1363 newinet->mc_ttl = ip_hdr(skb)->ttl;
1364 newinet->rcv_tos = ip_hdr(skb)->tos; 1364 newinet->rcv_tos = ip_hdr(skb)->tos;
@@ -1403,9 +1403,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1403 if (__inet_inherit_port(sk, newsk) < 0) 1403 if (__inet_inherit_port(sk, newsk) < 0)
1404 goto put_and_exit; 1404 goto put_and_exit;
1405 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); 1405 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1406 if (*own_req) 1406 if (likely(*own_req)) {
1407 tcp_move_syn(newtp, req); 1407 tcp_move_syn(newtp, req);
1408 1408 ireq->ireq_opt = NULL;
1409 } else {
1410 newinet->inet_opt = NULL;
1411 }
1409 return newsk; 1412 return newsk;
1410 1413
1411exit_overflow: 1414exit_overflow:
@@ -1416,6 +1419,7 @@ exit:
1416 tcp_listendrop(sk); 1419 tcp_listendrop(sk);
1417 return NULL; 1420 return NULL;
1418put_and_exit: 1421put_and_exit:
1422 newinet->inet_opt = NULL;
1419 inet_csk_prepare_forced_close(newsk); 1423 inet_csk_prepare_forced_close(newsk);
1420 tcp_done(newsk); 1424 tcp_done(newsk);
1421 goto exit; 1425 goto exit;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e45177ceb0ee..ebfbccae62fd 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -231,10 +231,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
231 } 231 }
232 } 232 }
233 233
234 /* Initial allocation may have already happened via setsockopt */ 234 return reuseport_alloc(sk);
235 if (!rcu_access_pointer(sk->sk_reuseport_cb))
236 return reuseport_alloc(sk);
237 return 0;
238} 235}
239 236
240/** 237/**
@@ -1061,7 +1058,7 @@ back_from_confirm:
1061 /* ... which is an evident application bug. --ANK */ 1058 /* ... which is an evident application bug. --ANK */
1062 release_sock(sk); 1059 release_sock(sk);
1063 1060
1064 net_dbg_ratelimited("cork app bug 2\n"); 1061 net_dbg_ratelimited("socket already corked\n");
1065 err = -EINVAL; 1062 err = -EINVAL;
1066 goto out; 1063 goto out;
1067 } 1064 }
@@ -1144,7 +1141,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
1144 if (unlikely(!up->pending)) { 1141 if (unlikely(!up->pending)) {
1145 release_sock(sk); 1142 release_sock(sk);
1146 1143
1147 net_dbg_ratelimited("udp cork app bug 3\n"); 1144 net_dbg_ratelimited("cork failed\n");
1148 return -EINVAL; 1145 return -EINVAL;
1149 } 1146 }
1150 1147
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 8081bafe441b..15535ee327c5 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
315 } 315 }
316 opt_space->dst1opt = fopt->dst1opt; 316 opt_space->dst1opt = fopt->dst1opt;
317 opt_space->opt_flen = fopt->opt_flen; 317 opt_space->opt_flen = fopt->opt_flen;
318 opt_space->tot_len = fopt->tot_len;
318 return opt_space; 319 return opt_space;
319} 320}
320EXPORT_SYMBOL_GPL(fl6_merge_options); 321EXPORT_SYMBOL_GPL(fl6_merge_options);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 43ca864327c7..5110a418cc4d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1161,11 +1161,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1161 if (WARN_ON(v6_cork->opt)) 1161 if (WARN_ON(v6_cork->opt))
1162 return -EINVAL; 1162 return -EINVAL;
1163 1163
1164 v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation); 1164 v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
1165 if (unlikely(!v6_cork->opt)) 1165 if (unlikely(!v6_cork->opt))
1166 return -ENOBUFS; 1166 return -ENOBUFS;
1167 1167
1168 v6_cork->opt->tot_len = opt->tot_len; 1168 v6_cork->opt->tot_len = sizeof(*opt);
1169 v6_cork->opt->opt_flen = opt->opt_flen; 1169 v6_cork->opt->opt_flen = opt->opt_flen;
1170 v6_cork->opt->opt_nflen = opt->opt_nflen; 1170 v6_cork->opt->opt_nflen = opt->opt_nflen;
1171 1171
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index bc6e8bfc5be4..f50452b919d5 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -988,6 +988,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
988 session->name, cmd, arg); 988 session->name, cmd, arg);
989 989
990 sk = ps->sock; 990 sk = ps->sock;
991 if (!sk)
992 return -EBADR;
993
991 sock_hold(sk); 994 sock_hold(sk);
992 995
993 switch (cmd) { 996 switch (cmd) {
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index a98fc2b5e0dc..ae995c8480db 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -4,7 +4,7 @@
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
6 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright 2013-2014 Intel Mobile Communications GmbH
7 * Copyright 2015 Intel Deutschland GmbH 7 * Copyright 2015-2017 Intel Deutschland GmbH
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -620,9 +620,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
620 620
621 pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; 621 pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
622 idx = key->conf.keyidx; 622 idx = key->conf.keyidx;
623 key->local = sdata->local;
624 key->sdata = sdata;
625 key->sta = sta;
626 623
627 mutex_lock(&sdata->local->key_mtx); 624 mutex_lock(&sdata->local->key_mtx);
628 625
@@ -633,6 +630,21 @@ int ieee80211_key_link(struct ieee80211_key *key,
633 else 630 else
634 old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]); 631 old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
635 632
633 /*
634 * Silently accept key re-installation without really installing the
635 * new version of the key to avoid nonce reuse or replay issues.
636 */
637 if (old_key && key->conf.keylen == old_key->conf.keylen &&
638 !memcmp(key->conf.key, old_key->conf.key, key->conf.keylen)) {
639 ieee80211_key_free_unused(key);
640 ret = 0;
641 goto out;
642 }
643
644 key->local = sdata->local;
645 key->sdata = sdata;
646 key->sta = sta;
647
636 increment_tailroom_need_count(sdata); 648 increment_tailroom_need_count(sdata);
637 649
638 ieee80211_key_replace(sdata, sta, pairwise, old_key, key); 650 ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
@@ -648,6 +660,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
648 ret = 0; 660 ret = 0;
649 } 661 }
650 662
663 out:
651 mutex_unlock(&sdata->local->key_mtx); 664 mutex_unlock(&sdata->local->key_mtx);
652 665
653 return ret; 666 return ret;
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index af3d636534ef..d30f7bd741d0 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -286,6 +286,7 @@ struct ncsi_dev_priv {
286 struct work_struct work; /* For channel management */ 286 struct work_struct work; /* For channel management */
287 struct packet_type ptype; /* NCSI packet Rx handler */ 287 struct packet_type ptype; /* NCSI packet Rx handler */
288 struct list_head node; /* Form NCSI device list */ 288 struct list_head node; /* Form NCSI device list */
289#define NCSI_MAX_VLAN_VIDS 15
289 struct list_head vlan_vids; /* List of active VLAN IDs */ 290 struct list_head vlan_vids; /* List of active VLAN IDs */
290}; 291};
291 292
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index 6898e7229285..f135938bf781 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -187,7 +187,7 @@ static struct ncsi_aen_handler {
187} ncsi_aen_handlers[] = { 187} ncsi_aen_handlers[] = {
188 { NCSI_PKT_AEN_LSC, 12, ncsi_aen_handler_lsc }, 188 { NCSI_PKT_AEN_LSC, 12, ncsi_aen_handler_lsc },
189 { NCSI_PKT_AEN_CR, 4, ncsi_aen_handler_cr }, 189 { NCSI_PKT_AEN_CR, 4, ncsi_aen_handler_cr },
190 { NCSI_PKT_AEN_HNCDSC, 4, ncsi_aen_handler_hncdsc } 190 { NCSI_PKT_AEN_HNCDSC, 8, ncsi_aen_handler_hncdsc }
191}; 191};
192 192
193int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb) 193int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb)
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 3fd3c39e6278..28c42b22b748 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -189,6 +189,7 @@ static void ncsi_channel_monitor(unsigned long data)
189 struct ncsi_channel *nc = (struct ncsi_channel *)data; 189 struct ncsi_channel *nc = (struct ncsi_channel *)data;
190 struct ncsi_package *np = nc->package; 190 struct ncsi_package *np = nc->package;
191 struct ncsi_dev_priv *ndp = np->ndp; 191 struct ncsi_dev_priv *ndp = np->ndp;
192 struct ncsi_channel_mode *ncm;
192 struct ncsi_cmd_arg nca; 193 struct ncsi_cmd_arg nca;
193 bool enabled, chained; 194 bool enabled, chained;
194 unsigned int monitor_state; 195 unsigned int monitor_state;
@@ -202,11 +203,15 @@ static void ncsi_channel_monitor(unsigned long data)
202 monitor_state = nc->monitor.state; 203 monitor_state = nc->monitor.state;
203 spin_unlock_irqrestore(&nc->lock, flags); 204 spin_unlock_irqrestore(&nc->lock, flags);
204 205
205 if (!enabled || chained) 206 if (!enabled || chained) {
207 ncsi_stop_channel_monitor(nc);
206 return; 208 return;
209 }
207 if (state != NCSI_CHANNEL_INACTIVE && 210 if (state != NCSI_CHANNEL_INACTIVE &&
208 state != NCSI_CHANNEL_ACTIVE) 211 state != NCSI_CHANNEL_ACTIVE) {
212 ncsi_stop_channel_monitor(nc);
209 return; 213 return;
214 }
210 215
211 switch (monitor_state) { 216 switch (monitor_state) {
212 case NCSI_CHANNEL_MONITOR_START: 217 case NCSI_CHANNEL_MONITOR_START:
@@ -217,28 +222,28 @@ static void ncsi_channel_monitor(unsigned long data)
217 nca.type = NCSI_PKT_CMD_GLS; 222 nca.type = NCSI_PKT_CMD_GLS;
218 nca.req_flags = 0; 223 nca.req_flags = 0;
219 ret = ncsi_xmit_cmd(&nca); 224 ret = ncsi_xmit_cmd(&nca);
220 if (ret) { 225 if (ret)
221 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n", 226 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
222 ret); 227 ret);
223 return;
224 }
225
226 break; 228 break;
227 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX: 229 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
228 break; 230 break;
229 default: 231 default:
230 if (!(ndp->flags & NCSI_DEV_HWA) && 232 if (!(ndp->flags & NCSI_DEV_HWA)) {
231 state == NCSI_CHANNEL_ACTIVE) {
232 ncsi_report_link(ndp, true); 233 ncsi_report_link(ndp, true);
233 ndp->flags |= NCSI_DEV_RESHUFFLE; 234 ndp->flags |= NCSI_DEV_RESHUFFLE;
234 } 235 }
235 236
237 ncsi_stop_channel_monitor(nc);
238
239 ncm = &nc->modes[NCSI_MODE_LINK];
236 spin_lock_irqsave(&nc->lock, flags); 240 spin_lock_irqsave(&nc->lock, flags);
237 nc->state = NCSI_CHANNEL_INVISIBLE; 241 nc->state = NCSI_CHANNEL_INVISIBLE;
242 ncm->data[2] &= ~0x1;
238 spin_unlock_irqrestore(&nc->lock, flags); 243 spin_unlock_irqrestore(&nc->lock, flags);
239 244
240 spin_lock_irqsave(&ndp->lock, flags); 245 spin_lock_irqsave(&ndp->lock, flags);
241 nc->state = NCSI_CHANNEL_INACTIVE; 246 nc->state = NCSI_CHANNEL_ACTIVE;
242 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 247 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
243 spin_unlock_irqrestore(&ndp->lock, flags); 248 spin_unlock_irqrestore(&ndp->lock, flags);
244 ncsi_process_next_channel(ndp); 249 ncsi_process_next_channel(ndp);
@@ -732,6 +737,10 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
732 if (index < 0) { 737 if (index < 0) {
733 netdev_err(ndp->ndev.dev, 738 netdev_err(ndp->ndev.dev,
734 "Failed to add new VLAN tag, error %d\n", index); 739 "Failed to add new VLAN tag, error %d\n", index);
740 if (index == -ENOSPC)
741 netdev_err(ndp->ndev.dev,
742 "Channel %u already has all VLAN filters set\n",
743 nc->id);
735 return -1; 744 return -1;
736 } 745 }
737 746
@@ -998,12 +1007,15 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
998 struct ncsi_package *np; 1007 struct ncsi_package *np;
999 struct ncsi_channel *nc; 1008 struct ncsi_channel *nc;
1000 unsigned int cap; 1009 unsigned int cap;
1010 bool has_channel = false;
1001 1011
1002 /* The hardware arbitration is disabled if any one channel 1012 /* The hardware arbitration is disabled if any one channel
1003 * doesn't support explicitly. 1013 * doesn't support explicitly.
1004 */ 1014 */
1005 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1015 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1006 NCSI_FOR_EACH_CHANNEL(np, nc) { 1016 NCSI_FOR_EACH_CHANNEL(np, nc) {
1017 has_channel = true;
1018
1007 cap = nc->caps[NCSI_CAP_GENERIC].cap; 1019 cap = nc->caps[NCSI_CAP_GENERIC].cap;
1008 if (!(cap & NCSI_CAP_GENERIC_HWA) || 1020 if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1009 (cap & NCSI_CAP_GENERIC_HWA_MASK) != 1021 (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
@@ -1014,8 +1026,13 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1014 } 1026 }
1015 } 1027 }
1016 1028
1017 ndp->flags |= NCSI_DEV_HWA; 1029 if (has_channel) {
1018 return true; 1030 ndp->flags |= NCSI_DEV_HWA;
1031 return true;
1032 }
1033
1034 ndp->flags &= ~NCSI_DEV_HWA;
1035 return false;
1019} 1036}
1020 1037
1021static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp) 1038static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
@@ -1403,7 +1420,6 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1403 1420
1404int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 1421int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1405{ 1422{
1406 struct ncsi_channel_filter *ncf;
1407 struct ncsi_dev_priv *ndp; 1423 struct ncsi_dev_priv *ndp;
1408 unsigned int n_vids = 0; 1424 unsigned int n_vids = 0;
1409 struct vlan_vid *vlan; 1425 struct vlan_vid *vlan;
@@ -1420,7 +1436,6 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1420 } 1436 }
1421 1437
1422 ndp = TO_NCSI_DEV_PRIV(nd); 1438 ndp = TO_NCSI_DEV_PRIV(nd);
1423 ncf = ndp->hot_channel->filters[NCSI_FILTER_VLAN];
1424 1439
1425 /* Add the VLAN id to our internal list */ 1440 /* Add the VLAN id to our internal list */
1426 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { 1441 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
@@ -1431,12 +1446,11 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1431 return 0; 1446 return 0;
1432 } 1447 }
1433 } 1448 }
1434 1449 if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1435 if (n_vids >= ncf->total) { 1450 netdev_warn(dev,
1436 netdev_info(dev, 1451 "tried to add vlan id %u but NCSI max already registered (%u)\n",
1437 "NCSI Channel supports up to %u VLAN tags but %u are already set\n", 1452 vid, NCSI_MAX_VLAN_VIDS);
1438 ncf->total, n_vids); 1453 return -ENOSPC;
1439 return -EINVAL;
1440 } 1454 }
1441 1455
1442 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 1456 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 265b9a892d41..927dad4759d1 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -959,7 +959,7 @@ static struct ncsi_rsp_handler {
959 { NCSI_PKT_RSP_EGMF, 4, ncsi_rsp_handler_egmf }, 959 { NCSI_PKT_RSP_EGMF, 4, ncsi_rsp_handler_egmf },
960 { NCSI_PKT_RSP_DGMF, 4, ncsi_rsp_handler_dgmf }, 960 { NCSI_PKT_RSP_DGMF, 4, ncsi_rsp_handler_dgmf },
961 { NCSI_PKT_RSP_SNFC, 4, ncsi_rsp_handler_snfc }, 961 { NCSI_PKT_RSP_SNFC, 4, ncsi_rsp_handler_snfc },
962 { NCSI_PKT_RSP_GVI, 36, ncsi_rsp_handler_gvi }, 962 { NCSI_PKT_RSP_GVI, 40, ncsi_rsp_handler_gvi },
963 { NCSI_PKT_RSP_GC, 32, ncsi_rsp_handler_gc }, 963 { NCSI_PKT_RSP_GC, 32, ncsi_rsp_handler_gc },
964 { NCSI_PKT_RSP_GP, -1, ncsi_rsp_handler_gp }, 964 { NCSI_PKT_RSP_GP, -1, ncsi_rsp_handler_gp },
965 { NCSI_PKT_RSP_GCPS, 172, ncsi_rsp_handler_gcps }, 965 { NCSI_PKT_RSP_GCPS, 172, ncsi_rsp_handler_gcps },
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f34750691c5c..b93148e8e9fb 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2307,6 +2307,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2307 size_t tlvlen = 0; 2307 size_t tlvlen = 0;
2308 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk); 2308 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
2309 unsigned int flags = 0; 2309 unsigned int flags = 0;
2310 bool nlk_has_extack = nlk->flags & NETLINK_F_EXT_ACK;
2310 2311
2311 /* Error messages get the original request appened, unless the user 2312 /* Error messages get the original request appened, unless the user
2312 * requests to cap the error message, and get extra error data if 2313 * requests to cap the error message, and get extra error data if
@@ -2317,7 +2318,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2317 payload += nlmsg_len(nlh); 2318 payload += nlmsg_len(nlh);
2318 else 2319 else
2319 flags |= NLM_F_CAPPED; 2320 flags |= NLM_F_CAPPED;
2320 if (nlk->flags & NETLINK_F_EXT_ACK && extack) { 2321 if (nlk_has_extack && extack) {
2321 if (extack->_msg) 2322 if (extack->_msg)
2322 tlvlen += nla_total_size(strlen(extack->_msg) + 1); 2323 tlvlen += nla_total_size(strlen(extack->_msg) + 1);
2323 if (extack->bad_attr) 2324 if (extack->bad_attr)
@@ -2326,8 +2327,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2326 } else { 2327 } else {
2327 flags |= NLM_F_CAPPED; 2328 flags |= NLM_F_CAPPED;
2328 2329
2329 if (nlk->flags & NETLINK_F_EXT_ACK && 2330 if (nlk_has_extack && extack && extack->cookie_len)
2330 extack && extack->cookie_len)
2331 tlvlen += nla_total_size(extack->cookie_len); 2331 tlvlen += nla_total_size(extack->cookie_len);
2332 } 2332 }
2333 2333
@@ -2355,7 +2355,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2355 errmsg->error = err; 2355 errmsg->error = err;
2356 memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh)); 2356 memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
2357 2357
2358 if (nlk->flags & NETLINK_F_EXT_ACK && extack) { 2358 if (nlk_has_extack && extack) {
2359 if (err) { 2359 if (err) {
2360 if (extack->_msg) 2360 if (extack->_msg)
2361 WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, 2361 WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index bec01a3daf5b..2986941164b1 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1769,7 +1769,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1769 1769
1770out: 1770out:
1771 if (err && rollover) { 1771 if (err && rollover) {
1772 kfree(rollover); 1772 kfree_rcu(rollover, rcu);
1773 po->rollover = NULL; 1773 po->rollover = NULL;
1774 } 1774 }
1775 mutex_unlock(&fanout_mutex); 1775 mutex_unlock(&fanout_mutex);
@@ -1796,8 +1796,10 @@ static struct packet_fanout *fanout_release(struct sock *sk)
1796 else 1796 else
1797 f = NULL; 1797 f = NULL;
1798 1798
1799 if (po->rollover) 1799 if (po->rollover) {
1800 kfree_rcu(po->rollover, rcu); 1800 kfree_rcu(po->rollover, rcu);
1801 po->rollover = NULL;
1802 }
1801 } 1803 }
1802 mutex_unlock(&fanout_mutex); 1804 mutex_unlock(&fanout_mutex);
1803 1805
@@ -3851,6 +3853,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3851 void *data = &val; 3853 void *data = &val;
3852 union tpacket_stats_u st; 3854 union tpacket_stats_u st;
3853 struct tpacket_rollover_stats rstats; 3855 struct tpacket_rollover_stats rstats;
3856 struct packet_rollover *rollover;
3854 3857
3855 if (level != SOL_PACKET) 3858 if (level != SOL_PACKET)
3856 return -ENOPROTOOPT; 3859 return -ENOPROTOOPT;
@@ -3929,13 +3932,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3929 0); 3932 0);
3930 break; 3933 break;
3931 case PACKET_ROLLOVER_STATS: 3934 case PACKET_ROLLOVER_STATS:
3932 if (!po->rollover) 3935 rcu_read_lock();
3936 rollover = rcu_dereference(po->rollover);
3937 if (rollover) {
3938 rstats.tp_all = atomic_long_read(&rollover->num);
3939 rstats.tp_huge = atomic_long_read(&rollover->num_huge);
3940 rstats.tp_failed = atomic_long_read(&rollover->num_failed);
3941 data = &rstats;
3942 lv = sizeof(rstats);
3943 }
3944 rcu_read_unlock();
3945 if (!rollover)
3933 return -EINVAL; 3946 return -EINVAL;
3934 rstats.tp_all = atomic_long_read(&po->rollover->num);
3935 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3936 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3937 data = &rstats;
3938 lv = sizeof(rstats);
3939 break; 3947 break;
3940 case PACKET_TX_HAS_OFF: 3948 case PACKET_TX_HAS_OFF:
3941 val = po->tp_tx_has_off; 3949 val = po->tp_tx_has_off;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index fb17552fd292..4b0a8288c98a 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -308,10 +308,11 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
308 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len, 308 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
309 gfp); 309 gfp);
310 /* The socket has been unlocked. */ 310 /* The socket has been unlocked. */
311 if (!IS_ERR(call)) 311 if (!IS_ERR(call)) {
312 call->notify_rx = notify_rx; 312 call->notify_rx = notify_rx;
313 mutex_unlock(&call->user_mutex);
314 }
313 315
314 mutex_unlock(&call->user_mutex);
315 _leave(" = %p", call); 316 _leave(" = %p", call);
316 return call; 317 return call;
317} 318}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index d230cb4c8094..b480d7c792ba 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -234,6 +234,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
234 tc_cls_common_offload_init(&cls_flower.common, tp); 234 tc_cls_common_offload_init(&cls_flower.common, tp);
235 cls_flower.command = TC_CLSFLOWER_DESTROY; 235 cls_flower.command = TC_CLSFLOWER_DESTROY;
236 cls_flower.cookie = (unsigned long) f; 236 cls_flower.cookie = (unsigned long) f;
237 cls_flower.egress_dev = f->hw_dev != tp->q->dev_queue->dev;
237 238
238 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower); 239 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower);
239} 240}
@@ -289,6 +290,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
289 cls_flower.command = TC_CLSFLOWER_STATS; 290 cls_flower.command = TC_CLSFLOWER_STATS;
290 cls_flower.cookie = (unsigned long) f; 291 cls_flower.cookie = (unsigned long) f;
291 cls_flower.exts = &f->exts; 292 cls_flower.exts = &f->exts;
293 cls_flower.egress_dev = f->hw_dev != tp->q->dev_queue->dev;
292 294
293 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, 295 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER,
294 &cls_flower); 296 &cls_flower);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 92a07141fd07..34f10e75f3b9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -421,7 +421,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
421{ 421{
422 struct dst_entry *dst; 422 struct dst_entry *dst;
423 423
424 if (!t) 424 if (sock_owned_by_user(sk) || !t)
425 return; 425 return;
426 dst = sctp_transport_dst_check(t); 426 dst = sctp_transport_dst_check(t);
427 if (dst) 427 if (dst)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d4730ada7f32..17841ab30798 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4906,6 +4906,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
4906 struct socket *sock; 4906 struct socket *sock;
4907 int err = 0; 4907 int err = 0;
4908 4908
4909 /* Do not peel off from one netns to another one. */
4910 if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
4911 return -EINVAL;
4912
4909 if (!asoc) 4913 if (!asoc)
4910 return -EINVAL; 4914 return -EINVAL;
4911 4915
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 14ed5a344cdf..e21991fe883a 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -310,11 +310,15 @@ static void hvs_close_connection(struct vmbus_channel *chan)
310 struct sock *sk = get_per_channel_state(chan); 310 struct sock *sk = get_per_channel_state(chan);
311 struct vsock_sock *vsk = vsock_sk(sk); 311 struct vsock_sock *vsk = vsock_sk(sk);
312 312
313 lock_sock(sk);
314
313 sk->sk_state = SS_UNCONNECTED; 315 sk->sk_state = SS_UNCONNECTED;
314 sock_set_flag(sk, SOCK_DONE); 316 sock_set_flag(sk, SOCK_DONE);
315 vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN; 317 vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
316 318
317 sk->sk_state_change(sk); 319 sk->sk_state_change(sk);
320
321 release_sock(sk);
318} 322}
319 323
320static void hvs_open_connection(struct vmbus_channel *chan) 324static void hvs_open_connection(struct vmbus_channel *chan)
@@ -344,6 +348,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
344 if (!sk) 348 if (!sk)
345 return; 349 return;
346 350
351 lock_sock(sk);
352
347 if ((conn_from_host && sk->sk_state != VSOCK_SS_LISTEN) || 353 if ((conn_from_host && sk->sk_state != VSOCK_SS_LISTEN) ||
348 (!conn_from_host && sk->sk_state != SS_CONNECTING)) 354 (!conn_from_host && sk->sk_state != SS_CONNECTING))
349 goto out; 355 goto out;
@@ -395,9 +401,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
395 401
396 vsock_insert_connected(vnew); 402 vsock_insert_connected(vnew);
397 403
398 lock_sock(sk);
399 vsock_enqueue_accept(sk, new); 404 vsock_enqueue_accept(sk, new);
400 release_sock(sk);
401 } else { 405 } else {
402 sk->sk_state = SS_CONNECTED; 406 sk->sk_state = SS_CONNECTED;
403 sk->sk_socket->state = SS_CONNECTED; 407 sk->sk_socket->state = SS_CONNECTED;
@@ -410,6 +414,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
410out: 414out:
411 /* Release refcnt obtained when we called vsock_find_bound_socket() */ 415 /* Release refcnt obtained when we called vsock_find_bound_socket() */
412 sock_put(sk); 416 sock_put(sk);
417
418 release_sock(sk);
413} 419}
414 420
415static u32 hvs_get_local_cid(void) 421static u32 hvs_get_local_cid(void)
@@ -476,13 +482,21 @@ out:
476 482
477static void hvs_release(struct vsock_sock *vsk) 483static void hvs_release(struct vsock_sock *vsk)
478{ 484{
485 struct sock *sk = sk_vsock(vsk);
479 struct hvsock *hvs = vsk->trans; 486 struct hvsock *hvs = vsk->trans;
480 struct vmbus_channel *chan = hvs->chan; 487 struct vmbus_channel *chan;
481 488
489 lock_sock(sk);
490
491 sk->sk_state = SS_DISCONNECTING;
492 vsock_remove_sock(vsk);
493
494 release_sock(sk);
495
496 chan = hvs->chan;
482 if (chan) 497 if (chan)
483 hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN); 498 hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN);
484 499
485 vsock_remove_sock(vsk);
486} 500}
487 501
488static void hvs_destruct(struct vsock_sock *vsk) 502static void hvs_destruct(struct vsock_sock *vsk)
diff --git a/samples/sockmap/sockmap_kern.c b/samples/sockmap/sockmap_kern.c
index f9b38ef82dc2..52b0053274f4 100644
--- a/samples/sockmap/sockmap_kern.c
+++ b/samples/sockmap/sockmap_kern.c
@@ -62,7 +62,7 @@ int bpf_prog2(struct __sk_buff *skb)
62 ret = 1; 62 ret = 1;
63 63
64 bpf_printk("sockmap: %d -> %d @ %d\n", lport, bpf_ntohl(rport), ret); 64 bpf_printk("sockmap: %d -> %d @ %d\n", lport, bpf_ntohl(rport), ret);
65 return bpf_sk_redirect_map(&sock_map, ret, 0); 65 return bpf_sk_redirect_map(skb, &sock_map, ret, 0);
66} 66}
67 67
68SEC("sockops") 68SEC("sockops")
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index bc7fcf010a5b..446beb7ac48d 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -78,29 +78,37 @@ static int simple_thread_fn(void *arg)
78} 78}
79 79
80static DEFINE_MUTEX(thread_mutex); 80static DEFINE_MUTEX(thread_mutex);
81static bool simple_thread_cnt;
81 82
82int foo_bar_reg(void) 83int foo_bar_reg(void)
83{ 84{
85 mutex_lock(&thread_mutex);
86 if (simple_thread_cnt++)
87 goto out;
88
84 pr_info("Starting thread for foo_bar_fn\n"); 89 pr_info("Starting thread for foo_bar_fn\n");
85 /* 90 /*
86 * We shouldn't be able to start a trace when the module is 91 * We shouldn't be able to start a trace when the module is
87 * unloading (there's other locks to prevent that). But 92 * unloading (there's other locks to prevent that). But
88 * for consistency sake, we still take the thread_mutex. 93 * for consistency sake, we still take the thread_mutex.
89 */ 94 */
90 mutex_lock(&thread_mutex);
91 simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn"); 95 simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
96 out:
92 mutex_unlock(&thread_mutex); 97 mutex_unlock(&thread_mutex);
93 return 0; 98 return 0;
94} 99}
95 100
96void foo_bar_unreg(void) 101void foo_bar_unreg(void)
97{ 102{
98 pr_info("Killing thread for foo_bar_fn\n");
99 /* protect against module unloading */
100 mutex_lock(&thread_mutex); 103 mutex_lock(&thread_mutex);
104 if (--simple_thread_cnt)
105 goto out;
106
107 pr_info("Killing thread for foo_bar_fn\n");
101 if (simple_tsk_fn) 108 if (simple_tsk_fn)
102 kthread_stop(simple_tsk_fn); 109 kthread_stop(simple_tsk_fn);
103 simple_tsk_fn = NULL; 110 simple_tsk_fn = NULL;
111 out:
104 mutex_unlock(&thread_mutex); 112 mutex_unlock(&thread_mutex);
105} 113}
106 114
diff --git a/security/commoncap.c b/security/commoncap.c
index c25e0d27537f..fc46f5b85251 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -585,13 +585,14 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
585 struct vfs_ns_cap_data data, *nscaps = &data; 585 struct vfs_ns_cap_data data, *nscaps = &data;
586 struct vfs_cap_data *caps = (struct vfs_cap_data *) &data; 586 struct vfs_cap_data *caps = (struct vfs_cap_data *) &data;
587 kuid_t rootkuid; 587 kuid_t rootkuid;
588 struct user_namespace *fs_ns = inode->i_sb->s_user_ns; 588 struct user_namespace *fs_ns;
589 589
590 memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data)); 590 memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
591 591
592 if (!inode) 592 if (!inode)
593 return -ENODATA; 593 return -ENODATA;
594 594
595 fs_ns = inode->i_sb->s_user_ns;
595 size = __vfs_getxattr((struct dentry *)dentry, inode, 596 size = __vfs_getxattr((struct dentry *)dentry, inode,
596 XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ); 597 XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ);
597 if (size == -ENODATA || size == -EOPNOTSUPP) 598 if (size == -ENODATA || size == -EOPNOTSUPP)
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index 91eafada3164..6462e6654ccf 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -45,6 +45,7 @@ config BIG_KEYS
45 bool "Large payload keys" 45 bool "Large payload keys"
46 depends on KEYS 46 depends on KEYS
47 depends on TMPFS 47 depends on TMPFS
48 select CRYPTO
48 select CRYPTO_AES 49 select CRYPTO_AES
49 select CRYPTO_GCM 50 select CRYPTO_GCM
50 help 51 help
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index e607830b6154..929e14978c42 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -247,7 +247,7 @@ void big_key_revoke(struct key *key)
247 247
248 /* clear the quota */ 248 /* clear the quota */
249 key_payload_reserve(key, 0); 249 key_payload_reserve(key, 0);
250 if (key_is_instantiated(key) && 250 if (key_is_positive(key) &&
251 (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD) 251 (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
252 vfs_truncate(path, 0); 252 vfs_truncate(path, 0);
253} 253}
@@ -279,7 +279,7 @@ void big_key_describe(const struct key *key, struct seq_file *m)
279 279
280 seq_puts(m, key->description); 280 seq_puts(m, key->description);
281 281
282 if (key_is_instantiated(key)) 282 if (key_is_positive(key))
283 seq_printf(m, ": %zu [%s]", 283 seq_printf(m, ": %zu [%s]",
284 datalen, 284 datalen,
285 datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff"); 285 datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 69855ba0d3b3..d92cbf9687c3 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -309,6 +309,13 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k
309 309
310 down_read(&ukey->sem); 310 down_read(&ukey->sem);
311 upayload = user_key_payload_locked(ukey); 311 upayload = user_key_payload_locked(ukey);
312 if (!upayload) {
313 /* key was revoked before we acquired its semaphore */
314 up_read(&ukey->sem);
315 key_put(ukey);
316 ukey = ERR_PTR(-EKEYREVOKED);
317 goto error;
318 }
312 *master_key = upayload->data; 319 *master_key = upayload->data;
313 *master_keylen = upayload->datalen; 320 *master_keylen = upayload->datalen;
314error: 321error:
@@ -847,7 +854,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
847 size_t datalen = prep->datalen; 854 size_t datalen = prep->datalen;
848 int ret = 0; 855 int ret = 0;
849 856
850 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) 857 if (key_is_negative(key))
851 return -ENOKEY; 858 return -ENOKEY;
852 if (datalen <= 0 || datalen > 32767 || !prep->data) 859 if (datalen <= 0 || datalen > 32767 || !prep->data)
853 return -EINVAL; 860 return -EINVAL;
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 87cb260e4890..f01d48cb3de1 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -129,15 +129,15 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
129 while (!list_empty(keys)) { 129 while (!list_empty(keys)) {
130 struct key *key = 130 struct key *key =
131 list_entry(keys->next, struct key, graveyard_link); 131 list_entry(keys->next, struct key, graveyard_link);
132 short state = key->state;
133
132 list_del(&key->graveyard_link); 134 list_del(&key->graveyard_link);
133 135
134 kdebug("- %u", key->serial); 136 kdebug("- %u", key->serial);
135 key_check(key); 137 key_check(key);
136 138
137 /* Throw away the key data if the key is instantiated */ 139 /* Throw away the key data if the key is instantiated */
138 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && 140 if (state == KEY_IS_POSITIVE && key->type->destroy)
139 !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
140 key->type->destroy)
141 key->type->destroy(key); 141 key->type->destroy(key);
142 142
143 security_key_free(key); 143 security_key_free(key);
@@ -151,7 +151,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
151 } 151 }
152 152
153 atomic_dec(&key->user->nkeys); 153 atomic_dec(&key->user->nkeys);
154 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 154 if (state != KEY_IS_UNINSTANTIATED)
155 atomic_dec(&key->user->nikeys); 155 atomic_dec(&key->user->nikeys);
156 156
157 key_user_put(key->user); 157 key_user_put(key->user);
diff --git a/security/keys/key.c b/security/keys/key.c
index eb914a838840..83bf4b4afd49 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -402,6 +402,18 @@ int key_payload_reserve(struct key *key, size_t datalen)
402EXPORT_SYMBOL(key_payload_reserve); 402EXPORT_SYMBOL(key_payload_reserve);
403 403
404/* 404/*
405 * Change the key state to being instantiated.
406 */
407static void mark_key_instantiated(struct key *key, int reject_error)
408{
409 /* Commit the payload before setting the state; barrier versus
410 * key_read_state().
411 */
412 smp_store_release(&key->state,
413 (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
414}
415
416/*
405 * Instantiate a key and link it into the target keyring atomically. Must be 417 * Instantiate a key and link it into the target keyring atomically. Must be
406 * called with the target keyring's semaphore writelocked. The target key's 418 * called with the target keyring's semaphore writelocked. The target key's
407 * semaphore need not be locked as instantiation is serialised by 419 * semaphore need not be locked as instantiation is serialised by
@@ -424,14 +436,14 @@ static int __key_instantiate_and_link(struct key *key,
424 mutex_lock(&key_construction_mutex); 436 mutex_lock(&key_construction_mutex);
425 437
426 /* can't instantiate twice */ 438 /* can't instantiate twice */
427 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 439 if (key->state == KEY_IS_UNINSTANTIATED) {
428 /* instantiate the key */ 440 /* instantiate the key */
429 ret = key->type->instantiate(key, prep); 441 ret = key->type->instantiate(key, prep);
430 442
431 if (ret == 0) { 443 if (ret == 0) {
432 /* mark the key as being instantiated */ 444 /* mark the key as being instantiated */
433 atomic_inc(&key->user->nikeys); 445 atomic_inc(&key->user->nikeys);
434 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 446 mark_key_instantiated(key, 0);
435 447
436 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 448 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
437 awaken = 1; 449 awaken = 1;
@@ -577,13 +589,10 @@ int key_reject_and_link(struct key *key,
577 mutex_lock(&key_construction_mutex); 589 mutex_lock(&key_construction_mutex);
578 590
579 /* can't instantiate twice */ 591 /* can't instantiate twice */
580 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 592 if (key->state == KEY_IS_UNINSTANTIATED) {
581 /* mark the key as being negatively instantiated */ 593 /* mark the key as being negatively instantiated */
582 atomic_inc(&key->user->nikeys); 594 atomic_inc(&key->user->nikeys);
583 key->reject_error = -error; 595 mark_key_instantiated(key, -error);
584 smp_wmb();
585 set_bit(KEY_FLAG_NEGATIVE, &key->flags);
586 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
587 now = current_kernel_time(); 596 now = current_kernel_time();
588 key->expiry = now.tv_sec + timeout; 597 key->expiry = now.tv_sec + timeout;
589 key_schedule_gc(key->expiry + key_gc_delay); 598 key_schedule_gc(key->expiry + key_gc_delay);
@@ -752,8 +761,8 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
752 761
753 ret = key->type->update(key, prep); 762 ret = key->type->update(key, prep);
754 if (ret == 0) 763 if (ret == 0)
755 /* updating a negative key instantiates it */ 764 /* Updating a negative key positively instantiates it */
756 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 765 mark_key_instantiated(key, 0);
757 766
758 up_write(&key->sem); 767 up_write(&key->sem);
759 768
@@ -936,6 +945,16 @@ error:
936 */ 945 */
937 __key_link_end(keyring, &index_key, edit); 946 __key_link_end(keyring, &index_key, edit);
938 947
948 key = key_ref_to_ptr(key_ref);
949 if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
950 ret = wait_for_key_construction(key, true);
951 if (ret < 0) {
952 key_ref_put(key_ref);
953 key_ref = ERR_PTR(ret);
954 goto error_free_prep;
955 }
956 }
957
939 key_ref = __key_update(key_ref, &prep); 958 key_ref = __key_update(key_ref, &prep);
940 goto error_free_prep; 959 goto error_free_prep;
941} 960}
@@ -986,8 +1005,8 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
986 1005
987 ret = key->type->update(key, &prep); 1006 ret = key->type->update(key, &prep);
988 if (ret == 0) 1007 if (ret == 0)
989 /* updating a negative key instantiates it */ 1008 /* Updating a negative key positively instantiates it */
990 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 1009 mark_key_instantiated(key, 0);
991 1010
992 up_write(&key->sem); 1011 up_write(&key->sem);
993 1012
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 365ff85d7e27..76d22f726ae4 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -766,10 +766,9 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
766 766
767 key = key_ref_to_ptr(key_ref); 767 key = key_ref_to_ptr(key_ref);
768 768
769 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { 769 ret = key_read_state(key);
770 ret = -ENOKEY; 770 if (ret < 0)
771 goto error2; 771 goto error2; /* Negatively instantiated */
772 }
773 772
774 /* see if we can read it directly */ 773 /* see if we can read it directly */
775 ret = key_permission(key_ref, KEY_NEED_READ); 774 ret = key_permission(key_ref, KEY_NEED_READ);
@@ -901,7 +900,7 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
901 atomic_dec(&key->user->nkeys); 900 atomic_dec(&key->user->nkeys);
902 atomic_inc(&newowner->nkeys); 901 atomic_inc(&newowner->nkeys);
903 902
904 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 903 if (key->state != KEY_IS_UNINSTANTIATED) {
905 atomic_dec(&key->user->nikeys); 904 atomic_dec(&key->user->nikeys);
906 atomic_inc(&newowner->nikeys); 905 atomic_inc(&newowner->nikeys);
907 } 906 }
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 4fa82a8a9c0e..a7e51f793867 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -414,7 +414,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
414 else 414 else
415 seq_puts(m, "[anon]"); 415 seq_puts(m, "[anon]");
416 416
417 if (key_is_instantiated(keyring)) { 417 if (key_is_positive(keyring)) {
418 if (keyring->keys.nr_leaves_on_tree != 0) 418 if (keyring->keys.nr_leaves_on_tree != 0)
419 seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); 419 seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
420 else 420 else
@@ -553,7 +553,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
553{ 553{
554 struct keyring_search_context *ctx = iterator_data; 554 struct keyring_search_context *ctx = iterator_data;
555 const struct key *key = keyring_ptr_to_key(object); 555 const struct key *key = keyring_ptr_to_key(object);
556 unsigned long kflags = key->flags; 556 unsigned long kflags = READ_ONCE(key->flags);
557 short state = READ_ONCE(key->state);
557 558
558 kenter("{%d}", key->serial); 559 kenter("{%d}", key->serial);
559 560
@@ -565,6 +566,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
565 566
566 /* skip invalidated, revoked and expired keys */ 567 /* skip invalidated, revoked and expired keys */
567 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { 568 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
569 time_t expiry = READ_ONCE(key->expiry);
570
568 if (kflags & ((1 << KEY_FLAG_INVALIDATED) | 571 if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
569 (1 << KEY_FLAG_REVOKED))) { 572 (1 << KEY_FLAG_REVOKED))) {
570 ctx->result = ERR_PTR(-EKEYREVOKED); 573 ctx->result = ERR_PTR(-EKEYREVOKED);
@@ -572,7 +575,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
572 goto skipped; 575 goto skipped;
573 } 576 }
574 577
575 if (key->expiry && ctx->now.tv_sec >= key->expiry) { 578 if (expiry && ctx->now.tv_sec >= expiry) {
576 if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) 579 if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
577 ctx->result = ERR_PTR(-EKEYEXPIRED); 580 ctx->result = ERR_PTR(-EKEYEXPIRED);
578 kleave(" = %d [expire]", ctx->skipped_ret); 581 kleave(" = %d [expire]", ctx->skipped_ret);
@@ -597,9 +600,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
597 600
598 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { 601 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
599 /* we set a different error code if we pass a negative key */ 602 /* we set a different error code if we pass a negative key */
600 if (kflags & (1 << KEY_FLAG_NEGATIVE)) { 603 if (state < 0) {
601 smp_rmb(); 604 ctx->result = ERR_PTR(state);
602 ctx->result = ERR_PTR(key->reject_error);
603 kleave(" = %d [neg]", ctx->skipped_ret); 605 kleave(" = %d [neg]", ctx->skipped_ret);
604 goto skipped; 606 goto skipped;
605 } 607 }
diff --git a/security/keys/permission.c b/security/keys/permission.c
index 732cc0beffdf..a72b4dd70c8a 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -88,7 +88,8 @@ EXPORT_SYMBOL(key_task_permission);
88 */ 88 */
89int key_validate(const struct key *key) 89int key_validate(const struct key *key)
90{ 90{
91 unsigned long flags = key->flags; 91 unsigned long flags = READ_ONCE(key->flags);
92 time_t expiry = READ_ONCE(key->expiry);
92 93
93 if (flags & (1 << KEY_FLAG_INVALIDATED)) 94 if (flags & (1 << KEY_FLAG_INVALIDATED))
94 return -ENOKEY; 95 return -ENOKEY;
@@ -99,9 +100,9 @@ int key_validate(const struct key *key)
99 return -EKEYREVOKED; 100 return -EKEYREVOKED;
100 101
101 /* check it hasn't expired */ 102 /* check it hasn't expired */
102 if (key->expiry) { 103 if (expiry) {
103 struct timespec now = current_kernel_time(); 104 struct timespec now = current_kernel_time();
104 if (now.tv_sec >= key->expiry) 105 if (now.tv_sec >= expiry)
105 return -EKEYEXPIRED; 106 return -EKEYEXPIRED;
106 } 107 }
107 108
diff --git a/security/keys/proc.c b/security/keys/proc.c
index de834309d100..6d1fcbba1e09 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -179,9 +179,12 @@ static int proc_keys_show(struct seq_file *m, void *v)
179 struct rb_node *_p = v; 179 struct rb_node *_p = v;
180 struct key *key = rb_entry(_p, struct key, serial_node); 180 struct key *key = rb_entry(_p, struct key, serial_node);
181 struct timespec now; 181 struct timespec now;
182 time_t expiry;
182 unsigned long timo; 183 unsigned long timo;
184 unsigned long flags;
183 key_ref_t key_ref, skey_ref; 185 key_ref_t key_ref, skey_ref;
184 char xbuf[16]; 186 char xbuf[16];
187 short state;
185 int rc; 188 int rc;
186 189
187 struct keyring_search_context ctx = { 190 struct keyring_search_context ctx = {
@@ -217,12 +220,13 @@ static int proc_keys_show(struct seq_file *m, void *v)
217 rcu_read_lock(); 220 rcu_read_lock();
218 221
219 /* come up with a suitable timeout value */ 222 /* come up with a suitable timeout value */
220 if (key->expiry == 0) { 223 expiry = READ_ONCE(key->expiry);
224 if (expiry == 0) {
221 memcpy(xbuf, "perm", 5); 225 memcpy(xbuf, "perm", 5);
222 } else if (now.tv_sec >= key->expiry) { 226 } else if (now.tv_sec >= expiry) {
223 memcpy(xbuf, "expd", 5); 227 memcpy(xbuf, "expd", 5);
224 } else { 228 } else {
225 timo = key->expiry - now.tv_sec; 229 timo = expiry - now.tv_sec;
226 230
227 if (timo < 60) 231 if (timo < 60)
228 sprintf(xbuf, "%lus", timo); 232 sprintf(xbuf, "%lus", timo);
@@ -236,18 +240,21 @@ static int proc_keys_show(struct seq_file *m, void *v)
236 sprintf(xbuf, "%luw", timo / (60*60*24*7)); 240 sprintf(xbuf, "%luw", timo / (60*60*24*7));
237 } 241 }
238 242
239#define showflag(KEY, LETTER, FLAG) \ 243 state = key_read_state(key);
240 (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
241 244
245#define showflag(FLAGS, LETTER, FLAG) \
246 ((FLAGS & (1 << FLAG)) ? LETTER : '-')
247
248 flags = READ_ONCE(key->flags);
242 seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", 249 seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
243 key->serial, 250 key->serial,
244 showflag(key, 'I', KEY_FLAG_INSTANTIATED), 251 state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
245 showflag(key, 'R', KEY_FLAG_REVOKED), 252 showflag(flags, 'R', KEY_FLAG_REVOKED),
246 showflag(key, 'D', KEY_FLAG_DEAD), 253 showflag(flags, 'D', KEY_FLAG_DEAD),
247 showflag(key, 'Q', KEY_FLAG_IN_QUOTA), 254 showflag(flags, 'Q', KEY_FLAG_IN_QUOTA),
248 showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT), 255 showflag(flags, 'U', KEY_FLAG_USER_CONSTRUCT),
249 showflag(key, 'N', KEY_FLAG_NEGATIVE), 256 state < 0 ? 'N' : '-',
250 showflag(key, 'i', KEY_FLAG_INVALIDATED), 257 showflag(flags, 'i', KEY_FLAG_INVALIDATED),
251 refcount_read(&key->usage), 258 refcount_read(&key->usage),
252 xbuf, 259 xbuf,
253 key->perm, 260 key->perm,
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 293d3598153b..740affd65ee9 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -730,7 +730,7 @@ try_again:
730 730
731 ret = -EIO; 731 ret = -EIO;
732 if (!(lflags & KEY_LOOKUP_PARTIAL) && 732 if (!(lflags & KEY_LOOKUP_PARTIAL) &&
733 !test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 733 key_read_state(key) == KEY_IS_UNINSTANTIATED)
734 goto invalid_key; 734 goto invalid_key;
735 735
736 /* check the permissions */ 736 /* check the permissions */
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 63e63a42db3c..e8036cd0ad54 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -595,10 +595,9 @@ int wait_for_key_construction(struct key *key, bool intr)
595 intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 595 intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
596 if (ret) 596 if (ret)
597 return -ERESTARTSYS; 597 return -ERESTARTSYS;
598 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { 598 ret = key_read_state(key);
599 smp_rmb(); 599 if (ret < 0)
600 return key->reject_error; 600 return ret;
601 }
602 return key_validate(key); 601 return key_validate(key);
603} 602}
604EXPORT_SYMBOL(wait_for_key_construction); 603EXPORT_SYMBOL(wait_for_key_construction);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 6ebf1af8fce9..424e1d90412e 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -73,7 +73,7 @@ static void request_key_auth_describe(const struct key *key,
73 73
74 seq_puts(m, "key:"); 74 seq_puts(m, "key:");
75 seq_puts(m, key->description); 75 seq_puts(m, key->description);
76 if (key_is_instantiated(key)) 76 if (key_is_positive(key))
77 seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); 77 seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
78} 78}
79 79
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index ddfaebf60fc8..bd85315cbfeb 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1066,7 +1066,7 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
1066 char *datablob; 1066 char *datablob;
1067 int ret = 0; 1067 int ret = 0;
1068 1068
1069 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) 1069 if (key_is_negative(key))
1070 return -ENOKEY; 1070 return -ENOKEY;
1071 p = key->payload.data[0]; 1071 p = key->payload.data[0];
1072 if (!p->migratable) 1072 if (!p->migratable)
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 3d8c68eba516..9f558bedba23 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -114,7 +114,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
114 114
115 /* attach the new data, displacing the old */ 115 /* attach the new data, displacing the old */
116 key->expiry = prep->expiry; 116 key->expiry = prep->expiry;
117 if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags)) 117 if (key_is_positive(key))
118 zap = dereference_key_locked(key); 118 zap = dereference_key_locked(key);
119 rcu_assign_keypointer(key, prep->payload.data[0]); 119 rcu_assign_keypointer(key, prep->payload.data[0]);
120 prep->payload.data[0] = NULL; 120 prep->payload.data[0] = NULL;
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(user_destroy);
162void user_describe(const struct key *key, struct seq_file *m) 162void user_describe(const struct key *key, struct seq_file *m)
163{ 163{
164 seq_puts(m, key->description); 164 seq_puts(m, key->description);
165 if (key_is_instantiated(key)) 165 if (key_is_positive(key))
166 seq_printf(m, ": %u", key->datalen); 166 seq_printf(m, ": %u", key->datalen);
167} 167}
168 168
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 0ff7926a5a69..cda64b489e42 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -23,8 +23,6 @@
23#include <sound/core.h> 23#include <sound/core.h>
24#include "seq_lock.h" 24#include "seq_lock.h"
25 25
26#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
27
28/* wait until all locks are released */ 26/* wait until all locks are released */
29void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line) 27void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
30{ 28{
@@ -41,5 +39,3 @@ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
41 } 39 }
42} 40}
43EXPORT_SYMBOL(snd_use_lock_sync_helper); 41EXPORT_SYMBOL(snd_use_lock_sync_helper);
44
45#endif
diff --git a/sound/core/seq/seq_lock.h b/sound/core/seq/seq_lock.h
index 54044bc2c9ef..ac38031c370e 100644
--- a/sound/core/seq/seq_lock.h
+++ b/sound/core/seq/seq_lock.h
@@ -3,8 +3,6 @@
3 3
4#include <linux/sched.h> 4#include <linux/sched.h>
5 5
6#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
7
8typedef atomic_t snd_use_lock_t; 6typedef atomic_t snd_use_lock_t;
9 7
10/* initialize lock */ 8/* initialize lock */
@@ -20,14 +18,4 @@ typedef atomic_t snd_use_lock_t;
20void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line); 18void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
21#define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__) 19#define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__)
22 20
23#else /* SMP || CONFIG_SND_DEBUG */
24
25typedef spinlock_t snd_use_lock_t; /* dummy */
26#define snd_use_lock_init(lockp) /**/
27#define snd_use_lock_use(lockp) /**/
28#define snd_use_lock_free(lockp) /**/
29#define snd_use_lock_sync(lockp) /**/
30
31#endif /* SMP || CONFIG_SND_DEBUG */
32
33#endif /* __SND_SEQ_LOCK_H */ 21#endif /* __SND_SEQ_LOCK_H */
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 6c58e6f73a01..e43af18d4383 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -484,3 +484,34 @@ void snd_ctl_sync_vmaster(struct snd_kcontrol *kcontrol, bool hook_only)
484 master->hook(master->hook_private_data, master->val); 484 master->hook(master->hook_private_data, master->val);
485} 485}
486EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster); 486EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster);
487
488/**
489 * snd_ctl_apply_vmaster_slaves - Apply function to each vmaster slave
490 * @kctl: vmaster kctl element
491 * @func: function to apply
492 * @arg: optional function argument
493 *
494 * Apply the function @func to each slave kctl of the given vmaster kctl.
495 * Returns 0 if successful, or a negative error code.
496 */
497int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
498 int (*func)(struct snd_kcontrol *, void *),
499 void *arg)
500{
501 struct link_master *master;
502 struct link_slave *slave;
503 int err;
504
505 master = snd_kcontrol_chip(kctl);
506 err = master_init(master);
507 if (err < 0)
508 return err;
509 list_for_each_entry(slave, &master->slaves, list) {
510 err = func(&slave->slave, arg);
511 if (err < 0)
512 return err;
513 }
514
515 return 0;
516}
517EXPORT_SYMBOL_GPL(snd_ctl_apply_vmaster_slaves);
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 978dc1801b3a..f6d2985b2520 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -284,6 +284,11 @@ int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus)
284 dev_dbg(bus->dev, "HDA capability ID: 0x%x\n", 284 dev_dbg(bus->dev, "HDA capability ID: 0x%x\n",
285 (cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF); 285 (cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF);
286 286
287 if (cur_cap == -1) {
288 dev_dbg(bus->dev, "Invalid capability reg read\n");
289 break;
290 }
291
287 switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) { 292 switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) {
288 case AZX_ML_CAP_ID: 293 case AZX_ML_CAP_ID:
289 dev_dbg(bus->dev, "Found ML capability\n"); 294 dev_dbg(bus->dev, "Found ML capability\n");
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 3db26c451837..a0989d231fd0 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1803,36 +1803,6 @@ static int check_slave_present(struct hda_codec *codec,
1803 return 1; 1803 return 1;
1804} 1804}
1805 1805
1806/* guess the value corresponding to 0dB */
1807static int get_kctl_0dB_offset(struct hda_codec *codec,
1808 struct snd_kcontrol *kctl, int *step_to_check)
1809{
1810 int _tlv[4];
1811 const int *tlv = NULL;
1812 int val = -1;
1813
1814 if ((kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
1815 kctl->tlv.c == snd_hda_mixer_amp_tlv) {
1816 get_ctl_amp_tlv(kctl, _tlv);
1817 tlv = _tlv;
1818 } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
1819 tlv = kctl->tlv.p;
1820 if (tlv && tlv[0] == SNDRV_CTL_TLVT_DB_SCALE) {
1821 int step = tlv[3];
1822 step &= ~TLV_DB_SCALE_MUTE;
1823 if (!step)
1824 return -1;
1825 if (*step_to_check && *step_to_check != step) {
1826 codec_err(codec, "Mismatching dB step for vmaster slave (%d!=%d)\n",
1827- *step_to_check, step);
1828 return -1;
1829 }
1830 *step_to_check = step;
1831 val = -tlv[2] / step;
1832 }
1833 return val;
1834}
1835
1836/* call kctl->put with the given value(s) */ 1806/* call kctl->put with the given value(s) */
1837static int put_kctl_with_value(struct snd_kcontrol *kctl, int val) 1807static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
1838{ 1808{
@@ -1847,19 +1817,58 @@ static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
1847 return 0; 1817 return 0;
1848} 1818}
1849 1819
1850/* initialize the slave volume with 0dB */ 1820struct slave_init_arg {
1851static int init_slave_0dB(struct hda_codec *codec, 1821 struct hda_codec *codec;
1852 void *data, struct snd_kcontrol *slave) 1822 int step;
1823};
1824
1825/* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */
1826static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
1853{ 1827{
1854 int offset = get_kctl_0dB_offset(codec, slave, data); 1828 struct slave_init_arg *arg = _arg;
1855 if (offset > 0) 1829 int _tlv[4];
1856 put_kctl_with_value(slave, offset); 1830 const int *tlv = NULL;
1831 int step;
1832 int val;
1833
1834 if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
1835 if (kctl->tlv.c != snd_hda_mixer_amp_tlv) {
1836 codec_err(arg->codec,
1837 "Unexpected TLV callback for slave %s:%d\n",
1838 kctl->id.name, kctl->id.index);
1839 return 0; /* ignore */
1840 }
1841 get_ctl_amp_tlv(kctl, _tlv);
1842 tlv = _tlv;
1843 } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
1844 tlv = kctl->tlv.p;
1845
1846 if (!tlv || tlv[0] != SNDRV_CTL_TLVT_DB_SCALE)
1847 return 0;
1848
1849 step = tlv[3];
1850 step &= ~TLV_DB_SCALE_MUTE;
1851 if (!step)
1852 return 0;
1853 if (arg->step && arg->step != step) {
1854 codec_err(arg->codec,
1855 "Mismatching dB step for vmaster slave (%d!=%d)\n",
1856 arg->step, step);
1857 return 0;
1858 }
1859
1860 arg->step = step;
1861 val = -tlv[2] / step;
1862 if (val > 0) {
1863 put_kctl_with_value(kctl, val);
1864 return val;
1865 }
1866
1857 return 0; 1867 return 0;
1858} 1868}
1859 1869
1860/* unmute the slave */ 1870/* unmute the slave via snd_ctl_apply_vmaster_slaves() */
1861static int init_slave_unmute(struct hda_codec *codec, 1871static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg)
1862 void *data, struct snd_kcontrol *slave)
1863{ 1872{
1864 return put_kctl_with_value(slave, 1); 1873 return put_kctl_with_value(slave, 1);
1865} 1874}
@@ -1919,9 +1928,13 @@ int __snd_hda_add_vmaster(struct hda_codec *codec, char *name,
1919 /* init with master mute & zero volume */ 1928 /* init with master mute & zero volume */
1920 put_kctl_with_value(kctl, 0); 1929 put_kctl_with_value(kctl, 0);
1921 if (init_slave_vol) { 1930 if (init_slave_vol) {
1922 int step = 0; 1931 struct slave_init_arg arg = {
1923 map_slaves(codec, slaves, suffix, 1932 .codec = codec,
1924 tlv ? init_slave_0dB : init_slave_unmute, &step); 1933 .step = 0,
1934 };
1935 snd_ctl_apply_vmaster_slaves(kctl,
1936 tlv ? init_slave_0dB : init_slave_unmute,
1937 &arg);
1925 } 1938 }
1926 1939
1927 if (ctl_ret) 1940 if (ctl_ret)
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 9ddaae3784f5..4f5f18f22974 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1354,6 +1354,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1354 case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */ 1354 case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
1355 case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */ 1355 case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
1356 case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */ 1356 case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
1357 case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
1357 if (fp->altsetting == 2) 1358 if (fp->altsetting == 2)
1358 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1359 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1359 break; 1360 break;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index f90860d1f897..24b35a1fd4d6 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -569,9 +569,10 @@ union bpf_attr {
569 * @flags: reserved for future use 569 * @flags: reserved for future use
570 * Return: 0 on success or negative error code 570 * Return: 0 on success or negative error code
571 * 571 *
572 * int bpf_sk_redirect_map(map, key, flags) 572 * int bpf_sk_redirect_map(skb, map, key, flags)
573 * Redirect skb to a sock in map using key as a lookup key for the 573 * Redirect skb to a sock in map using key as a lookup key for the
574 * sock in map. 574 * sock in map.
575 * @skb: pointer to skb
575 * @map: pointer to sockmap 576 * @map: pointer to sockmap
576 * @key: key to lookup sock in map 577 * @key: key to lookup sock in map
577 * @flags: reserved for future use 578 * @flags: reserved for future use
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index a0c518ecf085..c0e26ad1fa7e 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -267,12 +267,13 @@ static int decode_instructions(struct objtool_file *file)
267 &insn->immediate, 267 &insn->immediate,
268 &insn->stack_op); 268 &insn->stack_op);
269 if (ret) 269 if (ret)
270 return ret; 270 goto err;
271 271
272 if (!insn->type || insn->type > INSN_LAST) { 272 if (!insn->type || insn->type > INSN_LAST) {
273 WARN_FUNC("invalid instruction type %d", 273 WARN_FUNC("invalid instruction type %d",
274 insn->sec, insn->offset, insn->type); 274 insn->sec, insn->offset, insn->type);
275 return -1; 275 ret = -1;
276 goto err;
276 } 277 }
277 278
278 hash_add(file->insn_hash, &insn->hash, insn->offset); 279 hash_add(file->insn_hash, &insn->hash, insn->offset);
@@ -296,6 +297,10 @@ static int decode_instructions(struct objtool_file *file)
296 } 297 }
297 298
298 return 0; 299 return 0;
300
301err:
302 free(insn);
303 return ret;
299} 304}
300 305
301/* 306/*
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index e397453e5a46..63526f4416ea 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -8,8 +8,8 @@ perf-record - Run a command and record its profile into perf.data
8SYNOPSIS 8SYNOPSIS
9-------- 9--------
10[verse] 10[verse]
11'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] <command> 11'perf record' [-e <EVENT> | --event=EVENT] [-a] <command>
12'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] -- <command> [<options>] 12'perf record' [-e <EVENT> | --event=EVENT] [-a] -- <command> [<options>]
13 13
14DESCRIPTION 14DESCRIPTION
15----------- 15-----------
diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
index 462fc755092e..7a84d73324e3 100755
--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
@@ -10,6 +10,9 @@
10 10
11. $(dirname $0)/lib/probe.sh 11. $(dirname $0)/lib/probe.sh
12 12
13ld=$(realpath /lib64/ld*.so.* | uniq)
14libc=$(echo $ld | sed 's/ld/libc/g')
15
13trace_libc_inet_pton_backtrace() { 16trace_libc_inet_pton_backtrace() {
14 idx=0 17 idx=0
15 expected[0]="PING.*bytes" 18 expected[0]="PING.*bytes"
@@ -18,8 +21,8 @@ trace_libc_inet_pton_backtrace() {
18 expected[3]=".*packets transmitted.*" 21 expected[3]=".*packets transmitted.*"
19 expected[4]="rtt min.*" 22 expected[4]="rtt min.*"
20 expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)" 23 expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
21 expected[6]=".*inet_pton[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$" 24 expected[6]=".*inet_pton[[:space:]]\($libc\)$"
22 expected[7]="getaddrinfo[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$" 25 expected[7]="getaddrinfo[[:space:]]\($libc\)$"
23 expected[8]=".*\(.*/bin/ping.*\)$" 26 expected[8]=".*\(.*/bin/ping.*\)$"
24 27
25 perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do 28 perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
@@ -35,7 +38,7 @@ trace_libc_inet_pton_backtrace() {
35} 38}
36 39
37skip_if_no_perf_probe && \ 40skip_if_no_perf_probe && \
38perf probe -q /lib64/libc-*.so inet_pton && \ 41perf probe -q $libc inet_pton && \
39trace_libc_inet_pton_backtrace 42trace_libc_inet_pton_backtrace
40err=$? 43err=$?
41rm -f ${file} 44rm -f ${file}
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index ddb2c6fbdf91..db79017a6e56 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -532,7 +532,7 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
532 532
533void perf_hpp__column_unregister(struct perf_hpp_fmt *format) 533void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
534{ 534{
535 list_del(&format->list); 535 list_del_init(&format->list);
536} 536}
537 537
538void perf_hpp__cancel_cumulate(void) 538void perf_hpp__cancel_cumulate(void)
@@ -606,6 +606,13 @@ next:
606 606
607static void fmt_free(struct perf_hpp_fmt *fmt) 607static void fmt_free(struct perf_hpp_fmt *fmt)
608{ 608{
609 /*
610 * At this point fmt should be completely
611 * unhooked, if not it's a bug.
612 */
613 BUG_ON(!list_empty(&fmt->list));
614 BUG_ON(!list_empty(&fmt->sort_list));
615
609 if (fmt->free) 616 if (fmt->free)
610 fmt->free(fmt); 617 fmt->free(fmt);
611} 618}
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index c42edeac451f..dcfdafdc2f1c 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -8,6 +8,9 @@
8 8
9%{ 9%{
10#include <errno.h> 10#include <errno.h>
11#include <sys/types.h>
12#include <sys/stat.h>
13#include <unistd.h>
11#include "../perf.h" 14#include "../perf.h"
12#include "parse-events.h" 15#include "parse-events.h"
13#include "parse-events-bison.h" 16#include "parse-events-bison.h"
@@ -53,9 +56,8 @@ static int str(yyscan_t scanner, int token)
53 return token; 56 return token;
54} 57}
55 58
56static bool isbpf(yyscan_t scanner) 59static bool isbpf_suffix(char *text)
57{ 60{
58 char *text = parse_events_get_text(scanner);
59 int len = strlen(text); 61 int len = strlen(text);
60 62
61 if (len < 2) 63 if (len < 2)
@@ -68,6 +70,17 @@ static bool isbpf(yyscan_t scanner)
68 return false; 70 return false;
69} 71}
70 72
73static bool isbpf(yyscan_t scanner)
74{
75 char *text = parse_events_get_text(scanner);
76 struct stat st;
77
78 if (!isbpf_suffix(text))
79 return false;
80
81 return stat(text, &st) == 0;
82}
83
71/* 84/*
72 * This function is called when the parser gets two kind of input: 85 * This function is called when the parser gets two kind of input:
73 * 86 *
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index a7ebd9fe8e40..76ab0709a20c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -374,6 +374,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
374 tool->mmap2 = process_event_stub; 374 tool->mmap2 = process_event_stub;
375 if (tool->comm == NULL) 375 if (tool->comm == NULL)
376 tool->comm = process_event_stub; 376 tool->comm = process_event_stub;
377 if (tool->namespaces == NULL)
378 tool->namespaces = process_event_stub;
377 if (tool->fork == NULL) 379 if (tool->fork == NULL)
378 tool->fork = process_event_stub; 380 tool->fork = process_event_stub;
379 if (tool->exit == NULL) 381 if (tool->exit == NULL)
diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h
index 4ba726c90870..54af60462130 100644
--- a/tools/perf/util/xyarray.h
+++ b/tools/perf/util/xyarray.h
@@ -23,12 +23,12 @@ static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
23 23
24static inline int xyarray__max_y(struct xyarray *xy) 24static inline int xyarray__max_y(struct xyarray *xy)
25{ 25{
26 return xy->max_x; 26 return xy->max_y;
27} 27}
28 28
29static inline int xyarray__max_x(struct xyarray *xy) 29static inline int xyarray__max_x(struct xyarray *xy)
30{ 30{
31 return xy->max_y; 31 return xy->max_x;
32} 32}
33 33
34#endif /* _PERF_XYARRAY_H_ */ 34#endif /* _PERF_XYARRAY_H_ */
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 0dafba2c1e7d..bd9c6b31a504 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -92,7 +92,6 @@ unsigned int do_ring_perf_limit_reasons;
92unsigned int crystal_hz; 92unsigned int crystal_hz;
93unsigned long long tsc_hz; 93unsigned long long tsc_hz;
94int base_cpu; 94int base_cpu;
95int do_migrate;
96double discover_bclk(unsigned int family, unsigned int model); 95double discover_bclk(unsigned int family, unsigned int model);
97unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */ 96unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
98 /* IA32_HWP_REQUEST, IA32_HWP_STATUS */ 97 /* IA32_HWP_REQUEST, IA32_HWP_STATUS */
@@ -303,9 +302,6 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
303 302
304int cpu_migrate(int cpu) 303int cpu_migrate(int cpu)
305{ 304{
306 if (!do_migrate)
307 return 0;
308
309 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); 305 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
310 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set); 306 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
311 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) 307 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
@@ -5007,7 +5003,6 @@ void cmdline(int argc, char **argv)
5007 {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help 5003 {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help
5008 {"Joules", no_argument, 0, 'J'}, 5004 {"Joules", no_argument, 0, 'J'},
5009 {"list", no_argument, 0, 'l'}, 5005 {"list", no_argument, 0, 'l'},
5010 {"migrate", no_argument, 0, 'm'},
5011 {"out", required_argument, 0, 'o'}, 5006 {"out", required_argument, 0, 'o'},
5012 {"quiet", no_argument, 0, 'q'}, 5007 {"quiet", no_argument, 0, 'q'},
5013 {"show", required_argument, 0, 's'}, 5008 {"show", required_argument, 0, 's'},
@@ -5019,7 +5014,7 @@ void cmdline(int argc, char **argv)
5019 5014
5020 progname = argv[0]; 5015 progname = argv[0];
5021 5016
5022 while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:Jmo:qST:v", 5017 while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v",
5023 long_options, &option_index)) != -1) { 5018 long_options, &option_index)) != -1) {
5024 switch (opt) { 5019 switch (opt) {
5025 case 'a': 5020 case 'a':
@@ -5062,9 +5057,6 @@ void cmdline(int argc, char **argv)
5062 list_header_only++; 5057 list_header_only++;
5063 quiet++; 5058 quiet++;
5064 break; 5059 break;
5065 case 'm':
5066 do_migrate = 1;
5067 break;
5068 case 'o': 5060 case 'o':
5069 outf = fopen_or_die(optarg, "w"); 5061 outf = fopen_or_die(optarg, "w");
5070 break; 5062 break;
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index 36fb9161b34a..b2e02bdcd098 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -65,7 +65,7 @@ static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
65static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, 65static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
66 int optlen) = 66 int optlen) =
67 (void *) BPF_FUNC_setsockopt; 67 (void *) BPF_FUNC_setsockopt;
68static int (*bpf_sk_redirect_map)(void *map, int key, int flags) = 68static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
69 (void *) BPF_FUNC_sk_redirect_map; 69 (void *) BPF_FUNC_sk_redirect_map;
70static int (*bpf_sock_map_update)(void *map, void *key, void *value, 70static int (*bpf_sock_map_update)(void *map, void *key, void *value,
71 unsigned long long flags) = 71 unsigned long long flags) =
diff --git a/tools/testing/selftests/bpf/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/sockmap_verdict_prog.c
index 9b99bd10807d..2cd2d552938b 100644
--- a/tools/testing/selftests/bpf/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/sockmap_verdict_prog.c
@@ -61,8 +61,8 @@ int bpf_prog2(struct __sk_buff *skb)
61 bpf_printk("verdict: data[0] = redir(%u:%u)\n", map, sk); 61 bpf_printk("verdict: data[0] = redir(%u:%u)\n", map, sk);
62 62
63 if (!map) 63 if (!map)
64 return bpf_sk_redirect_map(&sock_map_rx, sk, 0); 64 return bpf_sk_redirect_map(skb, &sock_map_rx, sk, 0);
65 return bpf_sk_redirect_map(&sock_map_tx, sk, 0); 65 return bpf_sk_redirect_map(skb, &sock_map_tx, sk, 0);
66} 66}
67 67
68char _license[] SEC("license") = "GPL"; 68char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index fe3a443a1102..50ce52d2013d 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -466,7 +466,7 @@ static void test_sockmap(int tasks, void *data)
466 int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc; 466 int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc;
467 struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break; 467 struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break;
468 int ports[] = {50200, 50201, 50202, 50204}; 468 int ports[] = {50200, 50201, 50202, 50204};
469 int err, i, fd, sfd[6] = {0xdeadbeef}; 469 int err, i, fd, udp, sfd[6] = {0xdeadbeef};
470 u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0}; 470 u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0};
471 int parse_prog, verdict_prog; 471 int parse_prog, verdict_prog;
472 struct sockaddr_in addr; 472 struct sockaddr_in addr;
@@ -548,6 +548,16 @@ static void test_sockmap(int tasks, void *data)
548 goto out_sockmap; 548 goto out_sockmap;
549 } 549 }
550 550
551 /* Test update with unsupported UDP socket */
552 udp = socket(AF_INET, SOCK_DGRAM, 0);
553 i = 0;
554 err = bpf_map_update_elem(fd, &i, &udp, BPF_ANY);
555 if (!err) {
556 printf("Failed socket SOCK_DGRAM allowed '%i:%i'\n",
557 i, udp);
558 goto out_sockmap;
559 }
560
551 /* Test update without programs */ 561 /* Test update without programs */
552 for (i = 0; i < 6; i++) { 562 for (i = 0; i < 6; i++) {
553 err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); 563 err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 26f3250bdcd2..64ae21f64489 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1130,15 +1130,27 @@ static struct bpf_test tests[] = {
1130 .errstr = "invalid bpf_context access", 1130 .errstr = "invalid bpf_context access",
1131 }, 1131 },
1132 { 1132 {
1133 "check skb->mark is writeable by SK_SKB", 1133 "invalid access of skb->mark for SK_SKB",
1134 .insns = {
1135 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1136 offsetof(struct __sk_buff, mark)),
1137 BPF_EXIT_INSN(),
1138 },
1139 .result = REJECT,
1140 .prog_type = BPF_PROG_TYPE_SK_SKB,
1141 .errstr = "invalid bpf_context access",
1142 },
1143 {
1144 "check skb->mark is not writeable by SK_SKB",
1134 .insns = { 1145 .insns = {
1135 BPF_MOV64_IMM(BPF_REG_0, 0), 1146 BPF_MOV64_IMM(BPF_REG_0, 0),
1136 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1147 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1137 offsetof(struct __sk_buff, mark)), 1148 offsetof(struct __sk_buff, mark)),
1138 BPF_EXIT_INSN(), 1149 BPF_EXIT_INSN(),
1139 }, 1150 },
1140 .result = ACCEPT, 1151 .result = REJECT,
1141 .prog_type = BPF_PROG_TYPE_SK_SKB, 1152 .prog_type = BPF_PROG_TYPE_SK_SKB,
1153 .errstr = "invalid bpf_context access",
1142 }, 1154 },
1143 { 1155 {
1144 "check skb->tc_index is writeable by SK_SKB", 1156 "check skb->tc_index is writeable by SK_SKB",
@@ -6645,6 +6657,500 @@ static struct bpf_test tests[] = {
6645 .errstr = "BPF_END uses reserved fields", 6657 .errstr = "BPF_END uses reserved fields",
6646 .result = REJECT, 6658 .result = REJECT,
6647 }, 6659 },
6660 {
6661 "arithmetic ops make PTR_TO_CTX unusable",
6662 .insns = {
6663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6664 offsetof(struct __sk_buff, data) -
6665 offsetof(struct __sk_buff, mark)),
6666 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6667 offsetof(struct __sk_buff, mark)),
6668 BPF_EXIT_INSN(),
6669 },
6670 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
6671 .result = REJECT,
6672 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6673 },
6674 {
6675 "XDP pkt read, pkt_end mangling, bad access 1",
6676 .insns = {
6677 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6678 offsetof(struct xdp_md, data)),
6679 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6680 offsetof(struct xdp_md, data_end)),
6681 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6683 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
6684 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6685 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6686 BPF_MOV64_IMM(BPF_REG_0, 0),
6687 BPF_EXIT_INSN(),
6688 },
6689 .errstr = "R1 offset is outside of the packet",
6690 .result = REJECT,
6691 .prog_type = BPF_PROG_TYPE_XDP,
6692 },
6693 {
6694 "XDP pkt read, pkt_end mangling, bad access 2",
6695 .insns = {
6696 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6697 offsetof(struct xdp_md, data)),
6698 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6699 offsetof(struct xdp_md, data_end)),
6700 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6702 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
6703 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6704 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6705 BPF_MOV64_IMM(BPF_REG_0, 0),
6706 BPF_EXIT_INSN(),
6707 },
6708 .errstr = "R1 offset is outside of the packet",
6709 .result = REJECT,
6710 .prog_type = BPF_PROG_TYPE_XDP,
6711 },
6712 {
6713 "XDP pkt read, pkt_data' > pkt_end, good access",
6714 .insns = {
6715 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6716 offsetof(struct xdp_md, data)),
6717 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6718 offsetof(struct xdp_md, data_end)),
6719 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6721 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6722 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6723 BPF_MOV64_IMM(BPF_REG_0, 0),
6724 BPF_EXIT_INSN(),
6725 },
6726 .result = ACCEPT,
6727 .prog_type = BPF_PROG_TYPE_XDP,
6728 },
6729 {
6730 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
6731 .insns = {
6732 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6733 offsetof(struct xdp_md, data)),
6734 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6735 offsetof(struct xdp_md, data_end)),
6736 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6737 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6738 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6739 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
6740 BPF_MOV64_IMM(BPF_REG_0, 0),
6741 BPF_EXIT_INSN(),
6742 },
6743 .errstr = "R1 offset is outside of the packet",
6744 .result = REJECT,
6745 .prog_type = BPF_PROG_TYPE_XDP,
6746 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6747 },
6748 {
6749 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
6750 .insns = {
6751 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6752 offsetof(struct xdp_md, data)),
6753 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6754 offsetof(struct xdp_md, data_end)),
6755 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6756 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6757 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
6758 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6759 BPF_MOV64_IMM(BPF_REG_0, 0),
6760 BPF_EXIT_INSN(),
6761 },
6762 .errstr = "R1 offset is outside of the packet",
6763 .result = REJECT,
6764 .prog_type = BPF_PROG_TYPE_XDP,
6765 },
6766 {
6767 "XDP pkt read, pkt_end > pkt_data', good access",
6768 .insns = {
6769 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6770 offsetof(struct xdp_md, data)),
6771 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6772 offsetof(struct xdp_md, data_end)),
6773 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6775 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
6776 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6777 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
6778 BPF_MOV64_IMM(BPF_REG_0, 0),
6779 BPF_EXIT_INSN(),
6780 },
6781 .result = ACCEPT,
6782 .prog_type = BPF_PROG_TYPE_XDP,
6783 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6784 },
6785 {
6786 "XDP pkt read, pkt_end > pkt_data', bad access 1",
6787 .insns = {
6788 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6789 offsetof(struct xdp_md, data)),
6790 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6791 offsetof(struct xdp_md, data_end)),
6792 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6794 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
6795 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6796 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6797 BPF_MOV64_IMM(BPF_REG_0, 0),
6798 BPF_EXIT_INSN(),
6799 },
6800 .errstr = "R1 offset is outside of the packet",
6801 .result = REJECT,
6802 .prog_type = BPF_PROG_TYPE_XDP,
6803 },
6804 {
6805 "XDP pkt read, pkt_end > pkt_data', bad access 2",
6806 .insns = {
6807 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6808 offsetof(struct xdp_md, data)),
6809 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6810 offsetof(struct xdp_md, data_end)),
6811 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6812 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6813 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
6814 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6815 BPF_MOV64_IMM(BPF_REG_0, 0),
6816 BPF_EXIT_INSN(),
6817 },
6818 .errstr = "R1 offset is outside of the packet",
6819 .result = REJECT,
6820 .prog_type = BPF_PROG_TYPE_XDP,
6821 },
6822 {
6823 "XDP pkt read, pkt_data' < pkt_end, good access",
6824 .insns = {
6825 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6826 offsetof(struct xdp_md, data)),
6827 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6828 offsetof(struct xdp_md, data_end)),
6829 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6831 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
6832 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6833 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
6834 BPF_MOV64_IMM(BPF_REG_0, 0),
6835 BPF_EXIT_INSN(),
6836 },
6837 .result = ACCEPT,
6838 .prog_type = BPF_PROG_TYPE_XDP,
6839 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6840 },
6841 {
6842 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
6843 .insns = {
6844 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6845 offsetof(struct xdp_md, data)),
6846 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6847 offsetof(struct xdp_md, data_end)),
6848 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6850 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
6851 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6852 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6853 BPF_MOV64_IMM(BPF_REG_0, 0),
6854 BPF_EXIT_INSN(),
6855 },
6856 .errstr = "R1 offset is outside of the packet",
6857 .result = REJECT,
6858 .prog_type = BPF_PROG_TYPE_XDP,
6859 },
6860 {
6861 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
6862 .insns = {
6863 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6864 offsetof(struct xdp_md, data)),
6865 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6866 offsetof(struct xdp_md, data_end)),
6867 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6869 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
6870 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6871 BPF_MOV64_IMM(BPF_REG_0, 0),
6872 BPF_EXIT_INSN(),
6873 },
6874 .errstr = "R1 offset is outside of the packet",
6875 .result = REJECT,
6876 .prog_type = BPF_PROG_TYPE_XDP,
6877 },
6878 {
6879 "XDP pkt read, pkt_end < pkt_data', good access",
6880 .insns = {
6881 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6882 offsetof(struct xdp_md, data)),
6883 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6884 offsetof(struct xdp_md, data_end)),
6885 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6886 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6887 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
6888 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6889 BPF_MOV64_IMM(BPF_REG_0, 0),
6890 BPF_EXIT_INSN(),
6891 },
6892 .result = ACCEPT,
6893 .prog_type = BPF_PROG_TYPE_XDP,
6894 },
6895 {
6896 "XDP pkt read, pkt_end < pkt_data', bad access 1",
6897 .insns = {
6898 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6899 offsetof(struct xdp_md, data)),
6900 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6901 offsetof(struct xdp_md, data_end)),
6902 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6903 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6904 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
6905 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
6906 BPF_MOV64_IMM(BPF_REG_0, 0),
6907 BPF_EXIT_INSN(),
6908 },
6909 .errstr = "R1 offset is outside of the packet",
6910 .result = REJECT,
6911 .prog_type = BPF_PROG_TYPE_XDP,
6912 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6913 },
6914 {
6915 "XDP pkt read, pkt_end < pkt_data', bad access 2",
6916 .insns = {
6917 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6918 offsetof(struct xdp_md, data)),
6919 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6920 offsetof(struct xdp_md, data_end)),
6921 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6923 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
6924 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6925 BPF_MOV64_IMM(BPF_REG_0, 0),
6926 BPF_EXIT_INSN(),
6927 },
6928 .errstr = "R1 offset is outside of the packet",
6929 .result = REJECT,
6930 .prog_type = BPF_PROG_TYPE_XDP,
6931 },
6932 {
6933 "XDP pkt read, pkt_data' >= pkt_end, good access",
6934 .insns = {
6935 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6936 offsetof(struct xdp_md, data)),
6937 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6938 offsetof(struct xdp_md, data_end)),
6939 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6940 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6941 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
6942 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
6943 BPF_MOV64_IMM(BPF_REG_0, 0),
6944 BPF_EXIT_INSN(),
6945 },
6946 .result = ACCEPT,
6947 .prog_type = BPF_PROG_TYPE_XDP,
6948 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6949 },
6950 {
6951 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
6952 .insns = {
6953 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6954 offsetof(struct xdp_md, data)),
6955 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6956 offsetof(struct xdp_md, data_end)),
6957 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6959 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
6960 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6961 BPF_MOV64_IMM(BPF_REG_0, 0),
6962 BPF_EXIT_INSN(),
6963 },
6964 .errstr = "R1 offset is outside of the packet",
6965 .result = REJECT,
6966 .prog_type = BPF_PROG_TYPE_XDP,
6967 },
6968 {
6969 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
6970 .insns = {
6971 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6972 offsetof(struct xdp_md, data)),
6973 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6974 offsetof(struct xdp_md, data_end)),
6975 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6977 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
6978 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
6979 BPF_MOV64_IMM(BPF_REG_0, 0),
6980 BPF_EXIT_INSN(),
6981 },
6982 .errstr = "R1 offset is outside of the packet",
6983 .result = REJECT,
6984 .prog_type = BPF_PROG_TYPE_XDP,
6985 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6986 },
6987 {
6988 "XDP pkt read, pkt_end >= pkt_data', good access",
6989 .insns = {
6990 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6991 offsetof(struct xdp_md, data)),
6992 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6993 offsetof(struct xdp_md, data_end)),
6994 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6995 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6996 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
6997 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6998 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6999 BPF_MOV64_IMM(BPF_REG_0, 0),
7000 BPF_EXIT_INSN(),
7001 },
7002 .result = ACCEPT,
7003 .prog_type = BPF_PROG_TYPE_XDP,
7004 },
7005 {
7006 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
7007 .insns = {
7008 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7009 offsetof(struct xdp_md, data)),
7010 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7011 offsetof(struct xdp_md, data_end)),
7012 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7014 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7015 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7016 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7017 BPF_MOV64_IMM(BPF_REG_0, 0),
7018 BPF_EXIT_INSN(),
7019 },
7020 .errstr = "R1 offset is outside of the packet",
7021 .result = REJECT,
7022 .prog_type = BPF_PROG_TYPE_XDP,
7023 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7024 },
7025 {
7026 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
7027 .insns = {
7028 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7029 offsetof(struct xdp_md, data)),
7030 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7031 offsetof(struct xdp_md, data_end)),
7032 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7034 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7035 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7036 BPF_MOV64_IMM(BPF_REG_0, 0),
7037 BPF_EXIT_INSN(),
7038 },
7039 .errstr = "R1 offset is outside of the packet",
7040 .result = REJECT,
7041 .prog_type = BPF_PROG_TYPE_XDP,
7042 },
7043 {
7044 "XDP pkt read, pkt_data' <= pkt_end, good access",
7045 .insns = {
7046 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7047 offsetof(struct xdp_md, data)),
7048 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7049 offsetof(struct xdp_md, data_end)),
7050 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7051 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7052 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7053 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7054 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7055 BPF_MOV64_IMM(BPF_REG_0, 0),
7056 BPF_EXIT_INSN(),
7057 },
7058 .result = ACCEPT,
7059 .prog_type = BPF_PROG_TYPE_XDP,
7060 },
7061 {
7062 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
7063 .insns = {
7064 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7065 offsetof(struct xdp_md, data)),
7066 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7067 offsetof(struct xdp_md, data_end)),
7068 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7069 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7070 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7071 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7072 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7073 BPF_MOV64_IMM(BPF_REG_0, 0),
7074 BPF_EXIT_INSN(),
7075 },
7076 .errstr = "R1 offset is outside of the packet",
7077 .result = REJECT,
7078 .prog_type = BPF_PROG_TYPE_XDP,
7079 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7080 },
7081 {
7082 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
7083 .insns = {
7084 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7085 offsetof(struct xdp_md, data)),
7086 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7087 offsetof(struct xdp_md, data_end)),
7088 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7089 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7090 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7091 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7092 BPF_MOV64_IMM(BPF_REG_0, 0),
7093 BPF_EXIT_INSN(),
7094 },
7095 .errstr = "R1 offset is outside of the packet",
7096 .result = REJECT,
7097 .prog_type = BPF_PROG_TYPE_XDP,
7098 },
7099 {
7100 "XDP pkt read, pkt_end <= pkt_data', good access",
7101 .insns = {
7102 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7103 offsetof(struct xdp_md, data)),
7104 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7105 offsetof(struct xdp_md, data_end)),
7106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7107 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7108 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7109 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7110 BPF_MOV64_IMM(BPF_REG_0, 0),
7111 BPF_EXIT_INSN(),
7112 },
7113 .result = ACCEPT,
7114 .prog_type = BPF_PROG_TYPE_XDP,
7115 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7116 },
7117 {
7118 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
7119 .insns = {
7120 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7121 offsetof(struct xdp_md, data)),
7122 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7123 offsetof(struct xdp_md, data_end)),
7124 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7126 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7127 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7128 BPF_MOV64_IMM(BPF_REG_0, 0),
7129 BPF_EXIT_INSN(),
7130 },
7131 .errstr = "R1 offset is outside of the packet",
7132 .result = REJECT,
7133 .prog_type = BPF_PROG_TYPE_XDP,
7134 },
7135 {
7136 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
7137 .insns = {
7138 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7139 offsetof(struct xdp_md, data)),
7140 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7141 offsetof(struct xdp_md, data_end)),
7142 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7143 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7144 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
7145 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7146 BPF_MOV64_IMM(BPF_REG_0, 0),
7147 BPF_EXIT_INSN(),
7148 },
7149 .errstr = "R1 offset is outside of the packet",
7150 .result = REJECT,
7151 .prog_type = BPF_PROG_TYPE_XDP,
7152 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7153 },
6648}; 7154};
6649 7155
6650static int probe_filter_length(const struct bpf_insn *fp) 7156static int probe_filter_length(const struct bpf_insn *fp)